136 #define RFS4_MINLEN_RDDIR4 (4 + NFS4_VERIFIER_SIZE + 4 + RFS4_MINLEN_ENTRY4 + 4)
137 #define RFS4_MINLEN_RDDIR_BUF \
138 (DIRENT64_RECLEN(1) + DIRENT64_RECLEN(2) + DIRENT64_RECLEN(MAXNAMELEN))
139
140 /*
141 * It would be better to pad to 4 bytes since that's what XDR would do,
142 * but the dirents UFS gives us are already padded to 8, so just take
143 * what we're given. Dircount is only a hint anyway. Currently the
144 * solaris kernel is ASCII only, so there's no point in calling the
145 * UTF8 functions.
146 *
147 * dirent64: named padded to provide 8 byte struct alignment
148 * d_ino(8) + d_off(8) + d_reclen(2) + d_name(namelen + null(1) + pad)
149 *
150 * cookie: uint64_t + utf8namelen: uint_t + utf8name padded to 8 bytes
151 *
152 */
153 #define DIRENT64_TO_DIRCOUNT(dp) \
154 (3 * BYTES_PER_XDR_UNIT + DIRENT64_NAMELEN((dp)->d_reclen))
155
156 zone_key_t rfs4_zone_key;
157
158 static sysid_t lockt_sysid; /* dummy sysid for all LOCKT calls */
159
160 u_longlong_t nfs4_srv_caller_id;
161 uint_t nfs4_srv_vkey = 0;
162
163 void rfs4_init_compound_state(struct compound_state *);
164
165 static void nullfree(caddr_t);
166 static void rfs4_op_inval(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
167 struct compound_state *);
168 static void rfs4_op_access(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
169 struct compound_state *);
170 static void rfs4_op_close(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
171 struct compound_state *);
172 static void rfs4_op_commit(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
173 struct compound_state *);
174 static void rfs4_op_create(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
175 struct compound_state *);
176 static void rfs4_op_create_free(nfs_resop4 *resop);
484 VOPNAME_WRITE, { .femop_write = deleg_rd_write },
485 VOPNAME_SETATTR, { .femop_setattr = deleg_rd_setattr },
486 VOPNAME_RWLOCK, { .femop_rwlock = deleg_rd_rwlock },
487 VOPNAME_SPACE, { .femop_space = deleg_rd_space },
488 VOPNAME_SETSECATTR, { .femop_setsecattr = deleg_rd_setsecattr },
489 VOPNAME_VNEVENT, { .femop_vnevent = deleg_rd_vnevent },
490 NULL, NULL
491 };
492 static const fs_operation_def_t nfs4_wr_deleg_tmpl[] = {
493 VOPNAME_OPEN, { .femop_open = deleg_wr_open },
494 VOPNAME_READ, { .femop_read = deleg_wr_read },
495 VOPNAME_WRITE, { .femop_write = deleg_wr_write },
496 VOPNAME_SETATTR, { .femop_setattr = deleg_wr_setattr },
497 VOPNAME_RWLOCK, { .femop_rwlock = deleg_wr_rwlock },
498 VOPNAME_SPACE, { .femop_space = deleg_wr_space },
499 VOPNAME_SETSECATTR, { .femop_setsecattr = deleg_wr_setsecattr },
500 VOPNAME_VNEVENT, { .femop_vnevent = deleg_wr_vnevent },
501 NULL, NULL
502 };
503
504 /* ARGSUSED */
505 static void *
506 rfs4_zone_init(zoneid_t zoneid)
507 {
508 nfs4_srv_t *nsrv4;
509 timespec32_t verf;
510
511 nsrv4 = kmem_zalloc(sizeof (*nsrv4), KM_SLEEP);
512
513 /*
514 * The following algorithm attempts to find a unique verifier
515 * to be used as the write verifier returned from the server
516 * to the client. It is important that this verifier change
517 * whenever the server reboots. Of secondary importance, it
518 * is important for the verifier to be unique between two
519 * different servers.
520 *
521 * Thus, an attempt is made to use the system hostid and the
522 * current time in seconds when the nfssrv kernel module is
523 * loaded. It is assumed that an NFS server will not be able
524 * to boot and then to reboot in less than a second. If the
525 * hostid has not been set, then the current high resolution
526 * time is used. This will ensure different verifiers each
527 * time the server reboots and minimize the chances that two
532 if (verf.tv_sec != 0) {
533 verf.tv_nsec = gethrestime_sec();
534 } else {
535 timespec_t tverf;
536
537 gethrestime(&tverf);
538 verf.tv_sec = (time_t)tverf.tv_sec;
539 verf.tv_nsec = tverf.tv_nsec;
540 }
541 nsrv4->write4verf = *(uint64_t *)&verf;
542
543 /* Used to manage create/destroy of server state */
544 nsrv4->nfs4_server_state = NULL;
545 nsrv4->nfs4_cur_servinst = NULL;
546 nsrv4->nfs4_deleg_policy = SRV_NEVER_DELEGATE;
547 mutex_init(&nsrv4->deleg_lock, NULL, MUTEX_DEFAULT, NULL);
548 mutex_init(&nsrv4->state_lock, NULL, MUTEX_DEFAULT, NULL);
549 mutex_init(&nsrv4->servinst_lock, NULL, MUTEX_DEFAULT, NULL);
550 rw_init(&nsrv4->deleg_policy_lock, NULL, RW_DEFAULT, NULL);
551
552 return (nsrv4);
553 }
554
555 /* ARGSUSED */
556 static void
557 rfs4_zone_fini(zoneid_t zoneid, void *data)
558 {
559 nfs4_srv_t *nsrv4 = data;
560
561 mutex_destroy(&nsrv4->deleg_lock);
562 mutex_destroy(&nsrv4->state_lock);
563 mutex_destroy(&nsrv4->servinst_lock);
564 rw_destroy(&nsrv4->deleg_policy_lock);
565
566 kmem_free(nsrv4, sizeof (*nsrv4));
567 }
568
569 void
570 rfs4_srvrinit(void)
571 {
572 extern void rfs4_attr_init();
573
574 zone_key_create(&rfs4_zone_key, rfs4_zone_init, NULL, rfs4_zone_fini);
575
576 rfs4_attr_init();
577
578
579 if (fem_create("deleg_rdops", nfs4_rd_deleg_tmpl, &deleg_rdops) != 0) {
580 rfs4_disable_delegation();
581 } else if (fem_create("deleg_wrops", nfs4_wr_deleg_tmpl,
582 &deleg_wrops) != 0) {
583 rfs4_disable_delegation();
584 fem_free(deleg_rdops);
585 }
586
587 nfs4_srv_caller_id = fs_new_caller_id();
588 lockt_sysid = lm_alloc_sysidt();
589 vsd_create(&nfs4_srv_vkey, NULL);
590 rfs4_state_g_init();
591 }
592
593 void
594 rfs4_srvrfini(void)
595 {
596 if (lockt_sysid != LM_NOSYSID) {
597 lm_free_sysidt(lockt_sysid);
598 lockt_sysid = LM_NOSYSID;
599 }
600
601 rfs4_state_g_fini();
602
603 fem_free(deleg_rdops);
604 fem_free(deleg_wrops);
605
606 (void) zone_key_delete(rfs4_zone_key);
607 }
608
609 void
610 rfs4_do_server_start(int server_upordown,
611 int srv_delegation, int cluster_booted)
612 {
613 nfs4_srv_t *nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
614
615 /* Is this a warm start? */
616 if (server_upordown == NFS_SERVER_QUIESCED) {
617 cmn_err(CE_NOTE, "nfs4_srv: "
618 "server was previously quiesced; "
619 "existing NFSv4 state will be re-used");
620
621 /*
622 * HA-NFSv4: this is also the signal
623 * that a Resource Group failover has
624 * occurred.
625 */
626 if (cluster_booted)
627 hanfsv4_failover(nsrv4);
628 } else {
629 /* Cold start */
630 nsrv4->rfs4_start_time = 0;
631 rfs4_state_zone_init(nsrv4);
632 nsrv4->nfs4_drc = rfs4_init_drc(nfs4_drc_max,
633 nfs4_drc_hash);
1500 resp->status = NFS4ERR_ISDIR;
1501 else
1502 resp->status = NFS4ERR_INVAL;
1503 *cs->statusp = resp->status;
1504 goto out;
1505 }
1506
1507 if (crgetuid(cr) != va.va_uid &&
1508 (error = VOP_ACCESS(vp, VWRITE, 0, cs->cr, NULL))) {
1509 *cs->statusp = resp->status = puterrno4(error);
1510 goto out;
1511 }
1512
1513 error = VOP_FSYNC(vp, FSYNC, cr, NULL);
1514
1515 if (error) {
1516 *cs->statusp = resp->status = puterrno4(error);
1517 goto out;
1518 }
1519
1520 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
1521 *cs->statusp = resp->status = NFS4_OK;
1522 resp->writeverf = nsrv4->write4verf;
1523 out:
1524 DTRACE_NFSV4_2(op__commit__done, struct compound_state *, cs,
1525 COMMIT4res *, resp);
1526 }
1527
1528 /*
1529 * rfs4_op_mknod is called from rfs4_op_create after all initial verification
1530 * was completed. It does the nfsv4 create for special files.
1531 */
1532 /* ARGSUSED */
1533 static vnode_t *
1534 do_rfs4_op_mknod(CREATE4args *args, CREATE4res *resp, struct svc_req *req,
1535 struct compound_state *cs, vattr_t *vap, char *nm)
1536 {
1537 int error;
1538 cred_t *cr = cs->cr;
1539 vnode_t *dvp = cs->vp;
1540 vnode_t *vp = NULL;
5647 goto out;
5648 }
5649
5650 if (vp->v_type != VREG) {
5651 *cs->statusp = resp->status =
5652 ((vp->v_type == VDIR) ? NFS4ERR_ISDIR : NFS4ERR_INVAL);
5653 goto out;
5654 }
5655
5656 if (crgetuid(cr) != bva.va_uid &&
5657 (error = VOP_ACCESS(vp, VWRITE, 0, cr, &ct))) {
5658 *cs->statusp = resp->status = puterrno4(error);
5659 goto out;
5660 }
5661
5662 if (MANDLOCK(vp, bva.va_mode)) {
5663 *cs->statusp = resp->status = NFS4ERR_ACCESS;
5664 goto out;
5665 }
5666
5667 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
5668 if (args->data_len == 0) {
5669 *cs->statusp = resp->status = NFS4_OK;
5670 resp->count = 0;
5671 resp->committed = args->stable;
5672 resp->writeverf = nsrv4->write4verf;
5673 goto out;
5674 }
5675
5676 if (args->mblk != NULL) {
5677 mblk_t *m;
5678 uint_t bytes, round_len;
5679
5680 iovcnt = 0;
5681 bytes = 0;
5682 round_len = roundup(args->data_len, BYTES_PER_XDR_UNIT);
5683 for (m = args->mblk;
5684 m != NULL && bytes < round_len;
5685 m = m->b_cont) {
5686 iovcnt++;
5687 bytes += MBLKL(m);
5827
5828 cr = crget();
5829 ASSERT(cr != NULL);
5830
5831 if (sec_svc_getcred(req, cr, &cs.principal, &cs.nfsflavor) == 0) {
5832 DTRACE_NFSV4_2(compound__start, struct compound_state *,
5833 &cs, COMPOUND4args *, args);
5834 crfree(cr);
5835 DTRACE_NFSV4_2(compound__done, struct compound_state *,
5836 &cs, COMPOUND4res *, resp);
5837 svcerr_badcred(req->rq_xprt);
5838 if (rv != NULL)
5839 *rv = 1;
5840 return;
5841 }
5842 resp->array_len = args->array_len;
5843 resp->array = kmem_zalloc(args->array_len * sizeof (nfs_resop4),
5844 KM_SLEEP);
5845
5846 cs.basecr = cr;
5847 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
5848
5849 DTRACE_NFSV4_2(compound__start, struct compound_state *, &cs,
5850 COMPOUND4args *, args);
5851
5852 /*
5853 * For now, NFS4 compound processing must be protected by
5854 * exported_lock because it can access more than one exportinfo
5855 * per compound and share/unshare can now change multiple
5856 * exinfo structs. The NFS2/3 code only refs 1 exportinfo
5857 * per proc (excluding public exinfo), and exi_count design
5858 * is sufficient to protect concurrent execution of NFS2/3
5859 * ops along with unexport. This lock will be removed as
5860 * part of the NFSv4 phase 2 namespace redesign work.
5861 */
5862 rw_enter(&ne->exported_lock, RW_READER);
5863
5864 /*
5865 * If this is the first compound we've seen, we need to start all
5866 * new instances' grace periods.
5867 */
6639 cs->mandlock = TRUE;
6640
6641 if (VOP_GETATTR(vp, &cva, 0, cs->cr, NULL) == 0) {
6642 cs->mandlock = MANDLOCK(cs->vp, cva.va_mode);
6643
6644 /*
6645 * Truncate the file if necessary; this would be
6646 * the case for create over an existing file.
6647 */
6648
6649 if (trunc) {
6650 int in_crit = 0;
6651 rfs4_file_t *fp;
6652 nfs4_srv_t *nsrv4;
6653 bool_t create = FALSE;
6654
6655 /*
6656 * We are writing over an existing file.
6657 * Check to see if we need to recall a delegation.
6658 */
6659 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
6660 rfs4_hold_deleg_policy(nsrv4);
6661 if ((fp = rfs4_findfile(vp, NULL, &create)) != NULL) {
6662 if (rfs4_check_delegated_byfp(FWRITE, fp,
6663 (reqsize == 0), FALSE, FALSE, &clientid)) {
6664 rfs4_file_rele(fp);
6665 rfs4_rele_deleg_policy(nsrv4);
6666 VN_RELE(vp);
6667 *attrset = 0;
6668 return (NFS4ERR_DELAY);
6669 }
6670 rfs4_file_rele(fp);
6671 }
6672 rfs4_rele_deleg_policy(nsrv4);
6673
6674 if (nbl_need_check(vp)) {
6675 in_crit = 1;
6676
6677 ASSERT(reqsize == 0);
6678
6679 nbl_start_crit(vp, RW_READER);
8216 DTRACE_NFSV4_2(op__setclientid__done, struct compound_state *, cs,
8217 SETCLIENTID4res *, res);
8218 }
8219
8220 /*ARGSUSED*/
8221 void
8222 rfs4_op_setclientid_confirm(nfs_argop4 *argop, nfs_resop4 *resop,
8223 struct svc_req *req, struct compound_state *cs)
8224 {
8225 SETCLIENTID_CONFIRM4args *args =
8226 &argop->nfs_argop4_u.opsetclientid_confirm;
8227 SETCLIENTID_CONFIRM4res *res =
8228 &resop->nfs_resop4_u.opsetclientid_confirm;
8229 rfs4_client_t *cp, *cptoclose = NULL;
8230 nfs4_srv_t *nsrv4;
8231
8232 DTRACE_NFSV4_2(op__setclientid__confirm__start,
8233 struct compound_state *, cs,
8234 SETCLIENTID_CONFIRM4args *, args);
8235
8236 nsrv4 = zone_getspecific(rfs4_zone_key, curzone);
8237 *cs->statusp = res->status = NFS4_OK;
8238
8239 cp = rfs4_findclient_by_id(args->clientid, TRUE);
8240
8241 if (cp == NULL) {
8242 *cs->statusp = res->status =
8243 rfs4_check_clientid(&args->clientid, 1);
8244 goto out;
8245 }
8246
8247 if (!creds_ok(cp, req, cs)) {
8248 *cs->statusp = res->status = NFS4ERR_CLID_INUSE;
8249 rfs4_client_rele(cp);
8250 goto out;
8251 }
8252
8253 /* If the verifier doesn't match, the record doesn't match */
8254 if (cp->rc_confirm_verf != args->setclientid_confirm) {
8255 *cs->statusp = res->status = NFS4ERR_STALE_CLIENTID;
8256 rfs4_client_rele(cp);
|
136 #define RFS4_MINLEN_RDDIR4 (4 + NFS4_VERIFIER_SIZE + 4 + RFS4_MINLEN_ENTRY4 + 4)
137 #define RFS4_MINLEN_RDDIR_BUF \
138 (DIRENT64_RECLEN(1) + DIRENT64_RECLEN(2) + DIRENT64_RECLEN(MAXNAMELEN))
139
140 /*
141 * It would be better to pad to 4 bytes since that's what XDR would do,
142 * but the dirents UFS gives us are already padded to 8, so just take
143 * what we're given. Dircount is only a hint anyway. Currently the
144 * solaris kernel is ASCII only, so there's no point in calling the
145 * UTF8 functions.
146 *
147 * dirent64: named padded to provide 8 byte struct alignment
148 * d_ino(8) + d_off(8) + d_reclen(2) + d_name(namelen + null(1) + pad)
149 *
150 * cookie: uint64_t + utf8namelen: uint_t + utf8name padded to 8 bytes
151 *
152 */
153 #define DIRENT64_TO_DIRCOUNT(dp) \
154 (3 * BYTES_PER_XDR_UNIT + DIRENT64_NAMELEN((dp)->d_reclen))
155
156
157 static sysid_t lockt_sysid; /* dummy sysid for all LOCKT calls */
158
159 u_longlong_t nfs4_srv_caller_id;
160 uint_t nfs4_srv_vkey = 0;
161
162 void rfs4_init_compound_state(struct compound_state *);
163
164 static void nullfree(caddr_t);
165 static void rfs4_op_inval(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
166 struct compound_state *);
167 static void rfs4_op_access(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
168 struct compound_state *);
169 static void rfs4_op_close(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
170 struct compound_state *);
171 static void rfs4_op_commit(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
172 struct compound_state *);
173 static void rfs4_op_create(nfs_argop4 *, nfs_resop4 *, struct svc_req *,
174 struct compound_state *);
175 static void rfs4_op_create_free(nfs_resop4 *resop);
483 VOPNAME_WRITE, { .femop_write = deleg_rd_write },
484 VOPNAME_SETATTR, { .femop_setattr = deleg_rd_setattr },
485 VOPNAME_RWLOCK, { .femop_rwlock = deleg_rd_rwlock },
486 VOPNAME_SPACE, { .femop_space = deleg_rd_space },
487 VOPNAME_SETSECATTR, { .femop_setsecattr = deleg_rd_setsecattr },
488 VOPNAME_VNEVENT, { .femop_vnevent = deleg_rd_vnevent },
489 NULL, NULL
490 };
491 static const fs_operation_def_t nfs4_wr_deleg_tmpl[] = {
492 VOPNAME_OPEN, { .femop_open = deleg_wr_open },
493 VOPNAME_READ, { .femop_read = deleg_wr_read },
494 VOPNAME_WRITE, { .femop_write = deleg_wr_write },
495 VOPNAME_SETATTR, { .femop_setattr = deleg_wr_setattr },
496 VOPNAME_RWLOCK, { .femop_rwlock = deleg_wr_rwlock },
497 VOPNAME_SPACE, { .femop_space = deleg_wr_space },
498 VOPNAME_SETSECATTR, { .femop_setsecattr = deleg_wr_setsecattr },
499 VOPNAME_VNEVENT, { .femop_vnevent = deleg_wr_vnevent },
500 NULL, NULL
501 };
502
503 nfs4_srv_t *
504 nfs4_get_srv(void)
505 {
506 nfs_globals_t *ng = zone_getspecific(nfssrv_zone_key, curzone);
507 nfs4_srv_t *srv = ng->nfs4_srv;
508 ASSERT(srv != NULL);
509 return (srv);
510 }
511
512 void
513 rfs4_srv_zone_init(nfs_globals_t *ng)
514 {
515 nfs4_srv_t *nsrv4;
516 timespec32_t verf;
517
518 nsrv4 = kmem_zalloc(sizeof (*nsrv4), KM_SLEEP);
519
520 /*
521 * The following algorithm attempts to find a unique verifier
522 * to be used as the write verifier returned from the server
523 * to the client. It is important that this verifier change
524 * whenever the server reboots. Of secondary importance, it
525 * is important for the verifier to be unique between two
526 * different servers.
527 *
528 * Thus, an attempt is made to use the system hostid and the
529 * current time in seconds when the nfssrv kernel module is
530 * loaded. It is assumed that an NFS server will not be able
531 * to boot and then to reboot in less than a second. If the
532 * hostid has not been set, then the current high resolution
533 * time is used. This will ensure different verifiers each
534 * time the server reboots and minimize the chances that two
539 if (verf.tv_sec != 0) {
540 verf.tv_nsec = gethrestime_sec();
541 } else {
542 timespec_t tverf;
543
544 gethrestime(&tverf);
545 verf.tv_sec = (time_t)tverf.tv_sec;
546 verf.tv_nsec = tverf.tv_nsec;
547 }
548 nsrv4->write4verf = *(uint64_t *)&verf;
549
550 /* Used to manage create/destroy of server state */
551 nsrv4->nfs4_server_state = NULL;
552 nsrv4->nfs4_cur_servinst = NULL;
553 nsrv4->nfs4_deleg_policy = SRV_NEVER_DELEGATE;
554 mutex_init(&nsrv4->deleg_lock, NULL, MUTEX_DEFAULT, NULL);
555 mutex_init(&nsrv4->state_lock, NULL, MUTEX_DEFAULT, NULL);
556 mutex_init(&nsrv4->servinst_lock, NULL, MUTEX_DEFAULT, NULL);
557 rw_init(&nsrv4->deleg_policy_lock, NULL, RW_DEFAULT, NULL);
558
559 ng->nfs4_srv = nsrv4;
560 }
561
562 void
563 rfs4_srv_zone_fini(nfs_globals_t *ng)
564 {
565 nfs4_srv_t *nsrv4 = ng->nfs4_srv;
566
567 ng->nfs4_srv = NULL;
568
569 mutex_destroy(&nsrv4->deleg_lock);
570 mutex_destroy(&nsrv4->state_lock);
571 mutex_destroy(&nsrv4->servinst_lock);
572 rw_destroy(&nsrv4->deleg_policy_lock);
573
574 kmem_free(nsrv4, sizeof (*nsrv4));
575 }
576
577 void
578 rfs4_srvrinit(void)
579 {
580 extern void rfs4_attr_init();
581
582 rfs4_attr_init();
583
584 if (fem_create("deleg_rdops", nfs4_rd_deleg_tmpl, &deleg_rdops) != 0) {
585 rfs4_disable_delegation();
586 } else if (fem_create("deleg_wrops", nfs4_wr_deleg_tmpl,
587 &deleg_wrops) != 0) {
588 rfs4_disable_delegation();
589 fem_free(deleg_rdops);
590 }
591
592 nfs4_srv_caller_id = fs_new_caller_id();
593 lockt_sysid = lm_alloc_sysidt();
594 vsd_create(&nfs4_srv_vkey, NULL);
595 rfs4_state_g_init();
596 }
597
598 void
599 rfs4_srvrfini(void)
600 {
601 if (lockt_sysid != LM_NOSYSID) {
602 lm_free_sysidt(lockt_sysid);
603 lockt_sysid = LM_NOSYSID;
604 }
605
606 rfs4_state_g_fini();
607
608 fem_free(deleg_rdops);
609 fem_free(deleg_wrops);
610 }
611
612 void
613 rfs4_do_server_start(int server_upordown,
614 int srv_delegation, int cluster_booted)
615 {
616 nfs4_srv_t *nsrv4 = nfs4_get_srv();
617
618 /* Is this a warm start? */
619 if (server_upordown == NFS_SERVER_QUIESCED) {
620 cmn_err(CE_NOTE, "nfs4_srv: "
621 "server was previously quiesced; "
622 "existing NFSv4 state will be re-used");
623
624 /*
625 * HA-NFSv4: this is also the signal
626 * that a Resource Group failover has
627 * occurred.
628 */
629 if (cluster_booted)
630 hanfsv4_failover(nsrv4);
631 } else {
632 /* Cold start */
633 nsrv4->rfs4_start_time = 0;
634 rfs4_state_zone_init(nsrv4);
635 nsrv4->nfs4_drc = rfs4_init_drc(nfs4_drc_max,
636 nfs4_drc_hash);
1503 resp->status = NFS4ERR_ISDIR;
1504 else
1505 resp->status = NFS4ERR_INVAL;
1506 *cs->statusp = resp->status;
1507 goto out;
1508 }
1509
1510 if (crgetuid(cr) != va.va_uid &&
1511 (error = VOP_ACCESS(vp, VWRITE, 0, cs->cr, NULL))) {
1512 *cs->statusp = resp->status = puterrno4(error);
1513 goto out;
1514 }
1515
1516 error = VOP_FSYNC(vp, FSYNC, cr, NULL);
1517
1518 if (error) {
1519 *cs->statusp = resp->status = puterrno4(error);
1520 goto out;
1521 }
1522
1523 nsrv4 = nfs4_get_srv();
1524 *cs->statusp = resp->status = NFS4_OK;
1525 resp->writeverf = nsrv4->write4verf;
1526 out:
1527 DTRACE_NFSV4_2(op__commit__done, struct compound_state *, cs,
1528 COMMIT4res *, resp);
1529 }
1530
1531 /*
1532 * rfs4_op_mknod is called from rfs4_op_create after all initial verification
1533 * was completed. It does the nfsv4 create for special files.
1534 */
1535 /* ARGSUSED */
1536 static vnode_t *
1537 do_rfs4_op_mknod(CREATE4args *args, CREATE4res *resp, struct svc_req *req,
1538 struct compound_state *cs, vattr_t *vap, char *nm)
1539 {
1540 int error;
1541 cred_t *cr = cs->cr;
1542 vnode_t *dvp = cs->vp;
1543 vnode_t *vp = NULL;
5650 goto out;
5651 }
5652
5653 if (vp->v_type != VREG) {
5654 *cs->statusp = resp->status =
5655 ((vp->v_type == VDIR) ? NFS4ERR_ISDIR : NFS4ERR_INVAL);
5656 goto out;
5657 }
5658
5659 if (crgetuid(cr) != bva.va_uid &&
5660 (error = VOP_ACCESS(vp, VWRITE, 0, cr, &ct))) {
5661 *cs->statusp = resp->status = puterrno4(error);
5662 goto out;
5663 }
5664
5665 if (MANDLOCK(vp, bva.va_mode)) {
5666 *cs->statusp = resp->status = NFS4ERR_ACCESS;
5667 goto out;
5668 }
5669
5670 nsrv4 = nfs4_get_srv();
5671 if (args->data_len == 0) {
5672 *cs->statusp = resp->status = NFS4_OK;
5673 resp->count = 0;
5674 resp->committed = args->stable;
5675 resp->writeverf = nsrv4->write4verf;
5676 goto out;
5677 }
5678
5679 if (args->mblk != NULL) {
5680 mblk_t *m;
5681 uint_t bytes, round_len;
5682
5683 iovcnt = 0;
5684 bytes = 0;
5685 round_len = roundup(args->data_len, BYTES_PER_XDR_UNIT);
5686 for (m = args->mblk;
5687 m != NULL && bytes < round_len;
5688 m = m->b_cont) {
5689 iovcnt++;
5690 bytes += MBLKL(m);
5830
5831 cr = crget();
5832 ASSERT(cr != NULL);
5833
5834 if (sec_svc_getcred(req, cr, &cs.principal, &cs.nfsflavor) == 0) {
5835 DTRACE_NFSV4_2(compound__start, struct compound_state *,
5836 &cs, COMPOUND4args *, args);
5837 crfree(cr);
5838 DTRACE_NFSV4_2(compound__done, struct compound_state *,
5839 &cs, COMPOUND4res *, resp);
5840 svcerr_badcred(req->rq_xprt);
5841 if (rv != NULL)
5842 *rv = 1;
5843 return;
5844 }
5845 resp->array_len = args->array_len;
5846 resp->array = kmem_zalloc(args->array_len * sizeof (nfs_resop4),
5847 KM_SLEEP);
5848
5849 cs.basecr = cr;
5850 nsrv4 = nfs4_get_srv();
5851
5852 DTRACE_NFSV4_2(compound__start, struct compound_state *, &cs,
5853 COMPOUND4args *, args);
5854
5855 /*
5856 * For now, NFS4 compound processing must be protected by
5857 * exported_lock because it can access more than one exportinfo
5858 * per compound and share/unshare can now change multiple
5859 * exinfo structs. The NFS2/3 code only refs 1 exportinfo
5860 * per proc (excluding public exinfo), and exi_count design
5861 * is sufficient to protect concurrent execution of NFS2/3
5862 * ops along with unexport. This lock will be removed as
5863 * part of the NFSv4 phase 2 namespace redesign work.
5864 */
5865 rw_enter(&ne->exported_lock, RW_READER);
5866
5867 /*
5868 * If this is the first compound we've seen, we need to start all
5869 * new instances' grace periods.
5870 */
6642 cs->mandlock = TRUE;
6643
6644 if (VOP_GETATTR(vp, &cva, 0, cs->cr, NULL) == 0) {
6645 cs->mandlock = MANDLOCK(cs->vp, cva.va_mode);
6646
6647 /*
6648 * Truncate the file if necessary; this would be
6649 * the case for create over an existing file.
6650 */
6651
6652 if (trunc) {
6653 int in_crit = 0;
6654 rfs4_file_t *fp;
6655 nfs4_srv_t *nsrv4;
6656 bool_t create = FALSE;
6657
6658 /*
6659 * We are writing over an existing file.
6660 * Check to see if we need to recall a delegation.
6661 */
6662 nsrv4 = nfs4_get_srv();
6663 rfs4_hold_deleg_policy(nsrv4);
6664 if ((fp = rfs4_findfile(vp, NULL, &create)) != NULL) {
6665 if (rfs4_check_delegated_byfp(FWRITE, fp,
6666 (reqsize == 0), FALSE, FALSE, &clientid)) {
6667 rfs4_file_rele(fp);
6668 rfs4_rele_deleg_policy(nsrv4);
6669 VN_RELE(vp);
6670 *attrset = 0;
6671 return (NFS4ERR_DELAY);
6672 }
6673 rfs4_file_rele(fp);
6674 }
6675 rfs4_rele_deleg_policy(nsrv4);
6676
6677 if (nbl_need_check(vp)) {
6678 in_crit = 1;
6679
6680 ASSERT(reqsize == 0);
6681
6682 nbl_start_crit(vp, RW_READER);
8219 DTRACE_NFSV4_2(op__setclientid__done, struct compound_state *, cs,
8220 SETCLIENTID4res *, res);
8221 }
8222
8223 /*ARGSUSED*/
8224 void
8225 rfs4_op_setclientid_confirm(nfs_argop4 *argop, nfs_resop4 *resop,
8226 struct svc_req *req, struct compound_state *cs)
8227 {
8228 SETCLIENTID_CONFIRM4args *args =
8229 &argop->nfs_argop4_u.opsetclientid_confirm;
8230 SETCLIENTID_CONFIRM4res *res =
8231 &resop->nfs_resop4_u.opsetclientid_confirm;
8232 rfs4_client_t *cp, *cptoclose = NULL;
8233 nfs4_srv_t *nsrv4;
8234
8235 DTRACE_NFSV4_2(op__setclientid__confirm__start,
8236 struct compound_state *, cs,
8237 SETCLIENTID_CONFIRM4args *, args);
8238
8239 nsrv4 = nfs4_get_srv();
8240 *cs->statusp = res->status = NFS4_OK;
8241
8242 cp = rfs4_findclient_by_id(args->clientid, TRUE);
8243
8244 if (cp == NULL) {
8245 *cs->statusp = res->status =
8246 rfs4_check_clientid(&args->clientid, 1);
8247 goto out;
8248 }
8249
8250 if (!creds_ok(cp, req, cs)) {
8251 *cs->statusp = res->status = NFS4ERR_CLID_INUSE;
8252 rfs4_client_rele(cp);
8253 goto out;
8254 }
8255
8256 /* If the verifier doesn't match, the record doesn't match */
8257 if (cp->rc_confirm_verf != args->setclientid_confirm) {
8258 *cs->statusp = res->status = NFS4ERR_STALE_CLIENTID;
8259 rfs4_client_rele(cp);
|