3796
3797 }
3798
3799 va.va_mask = AT_MODE;
3800 error = VOP_GETATTR(vp, &va, 0, cs->cr, NULL);
3801 if (error) {
3802 *cs->statusp = resp->status = puterrno4(error);
3803 goto out;
3804 }
3805
3806 if (MANDLOCK(vp, va.va_mode)) {
3807 *cs->statusp = resp->status = NFS4ERR_ACCESS;
3808 goto out;
3809 }
3810
3811 data = kmem_alloc(MAXPATHLEN + 1, KM_SLEEP);
3812
3813 if (is_referral) {
3814 char *s;
3815 size_t strsz;
3816
3817 /* Get an artificial symlink based on a referral */
3818 s = build_symlink(vp, cs->cr, &strsz);
3819 global_svstat_ptr[4][NFS_REFERLINKS].value.ui64++;
3820 DTRACE_PROBE2(nfs4serv__func__referral__reflink,
3821 vnode_t *, vp, char *, s);
3822 if (s == NULL)
3823 error = EINVAL;
3824 else {
3825 error = 0;
3826 (void) strlcpy(data, s, MAXPATHLEN + 1);
3827 kmem_free(s, strsz);
3828 }
3829
3830 } else {
3831
3832 iov.iov_base = data;
3833 iov.iov_len = MAXPATHLEN;
3834 uio.uio_iov = &iov;
3835 uio.uio_iovcnt = 1;
3836 uio.uio_segflg = UIO_SYSSPACE;
3837 uio.uio_extflg = UIO_COPY_CACHED;
3838 uio.uio_loffset = 0;
3839 uio.uio_resid = MAXPATHLEN;
5875 rw_enter(&ne->exported_lock, RW_READER);
5876
5877 /*
5878 * If this is the first compound we've seen, we need to start all
5879 * new instances' grace periods.
5880 */
5881 if (nsrv4->seen_first_compound == 0) {
5882 rfs4_grace_start_new(nsrv4);
5883 /*
5884 * This must be set after rfs4_grace_start_new(), otherwise
5885 * another thread could proceed past here before the former
5886 * is finished.
5887 */
5888 nsrv4->seen_first_compound = 1;
5889 }
5890
5891 for (i = 0; i < args->array_len && cs.cont; i++) {
5892 nfs_argop4 *argop;
5893 nfs_resop4 *resop;
5894 uint_t op;
5895
5896 argop = &args->array[i];
5897 resop = &resp->array[i];
5898 resop->resop = argop->argop;
5899 op = (uint_t)resop->resop;
5900
5901 if (op < rfsv4disp_cnt) {
5902 /*
5903 * Count the individual ops here; NULL and COMPOUND
5904 * are counted in common_dispatch()
5905 */
5906 rfsproccnt_v4_ptr[op].value.ui64++;
5907
5908 NFS4_DEBUG(rfs4_debug > 1,
5909 (CE_NOTE, "Executing %s", rfs4_op_string[op]));
5910 (*rfsv4disptab[op].dis_proc)(argop, resop, req, &cs);
5911 NFS4_DEBUG(rfs4_debug > 1, (CE_NOTE, "%s returned %d",
5912 rfs4_op_string[op], *cs.statusp));
5913 if (*cs.statusp != NFS4_OK)
5914 cs.cont = FALSE;
5915 } else {
5916 /*
5917 * This is effectively dead code since XDR code
5918 * will have already returned BADXDR if op doesn't
5919 * decode to legal value. This only done for a
5920 * day when XDR code doesn't verify v4 opcodes.
5921 */
5922 op = OP_ILLEGAL;
5923 rfsproccnt_v4_ptr[OP_ILLEGAL_IDX].value.ui64++;
5924
5925 rfs4_op_illegal(argop, resop, req, &cs);
5926 cs.cont = FALSE;
5927 }
5928
5929 /*
5930 * If not at last op, and if we are to stop, then
5931 * compact the results array.
5932 */
5933 if ((i + 1) < args->array_len && !cs.cont) {
5934 nfs_resop4 *new_res = kmem_alloc(
5935 (i+1) * sizeof (nfs_resop4), KM_SLEEP);
5936 bcopy(resp->array,
5937 new_res, (i+1) * sizeof (nfs_resop4));
5938 kmem_free(resp->array,
5939 args->array_len * sizeof (nfs_resop4));
5940
5941 resp->array_len = i + 1;
5942 resp->array = new_res;
5943 }
|
3796
3797 }
3798
3799 va.va_mask = AT_MODE;
3800 error = VOP_GETATTR(vp, &va, 0, cs->cr, NULL);
3801 if (error) {
3802 *cs->statusp = resp->status = puterrno4(error);
3803 goto out;
3804 }
3805
3806 if (MANDLOCK(vp, va.va_mode)) {
3807 *cs->statusp = resp->status = NFS4ERR_ACCESS;
3808 goto out;
3809 }
3810
3811 data = kmem_alloc(MAXPATHLEN + 1, KM_SLEEP);
3812
3813 if (is_referral) {
3814 char *s;
3815 size_t strsz;
3816 kstat_named_t *stat =
3817 cs->exi->exi_ne->ne_globals->svstat[NFS_V4];
3818
3819 /* Get an artificial symlink based on a referral */
3820 s = build_symlink(vp, cs->cr, &strsz);
3821 stat[NFS_REFERLINKS].value.ui64++;
3822 DTRACE_PROBE2(nfs4serv__func__referral__reflink,
3823 vnode_t *, vp, char *, s);
3824 if (s == NULL)
3825 error = EINVAL;
3826 else {
3827 error = 0;
3828 (void) strlcpy(data, s, MAXPATHLEN + 1);
3829 kmem_free(s, strsz);
3830 }
3831
3832 } else {
3833
3834 iov.iov_base = data;
3835 iov.iov_len = MAXPATHLEN;
3836 uio.uio_iov = &iov;
3837 uio.uio_iovcnt = 1;
3838 uio.uio_segflg = UIO_SYSSPACE;
3839 uio.uio_extflg = UIO_COPY_CACHED;
3840 uio.uio_loffset = 0;
3841 uio.uio_resid = MAXPATHLEN;
5877 rw_enter(&ne->exported_lock, RW_READER);
5878
5879 /*
5880 * If this is the first compound we've seen, we need to start all
5881 * new instances' grace periods.
5882 */
5883 if (nsrv4->seen_first_compound == 0) {
5884 rfs4_grace_start_new(nsrv4);
5885 /*
5886 * This must be set after rfs4_grace_start_new(), otherwise
5887 * another thread could proceed past here before the former
5888 * is finished.
5889 */
5890 nsrv4->seen_first_compound = 1;
5891 }
5892
5893 for (i = 0; i < args->array_len && cs.cont; i++) {
5894 nfs_argop4 *argop;
5895 nfs_resop4 *resop;
5896 uint_t op;
5897 kstat_named_t *stat = ne->ne_globals->rfsproccnt[NFS_V4];
5898
5899 argop = &args->array[i];
5900 resop = &resp->array[i];
5901 resop->resop = argop->argop;
5902 op = (uint_t)resop->resop;
5903
5904 if (op < rfsv4disp_cnt) {
5905 /*
5906 * Count the individual ops here; NULL and COMPOUND
5907 * are counted in common_dispatch()
5908 */
5909 stat[op].value.ui64++;
5910
5911 NFS4_DEBUG(rfs4_debug > 1,
5912 (CE_NOTE, "Executing %s", rfs4_op_string[op]));
5913 (*rfsv4disptab[op].dis_proc)(argop, resop, req, &cs);
5914 NFS4_DEBUG(rfs4_debug > 1, (CE_NOTE, "%s returned %d",
5915 rfs4_op_string[op], *cs.statusp));
5916 if (*cs.statusp != NFS4_OK)
5917 cs.cont = FALSE;
5918 } else {
5919 /*
5920 * This is effectively dead code since XDR code
5921 * will have already returned BADXDR if op doesn't
5922 * decode to legal value. This only done for a
5923 * day when XDR code doesn't verify v4 opcodes.
5924 */
5925 op = OP_ILLEGAL;
5926 stat[OP_ILLEGAL_IDX].value.ui64++;
5927
5928 rfs4_op_illegal(argop, resop, req, &cs);
5929 cs.cont = FALSE;
5930 }
5931
5932 /*
5933 * If not at last op, and if we are to stop, then
5934 * compact the results array.
5935 */
5936 if ((i + 1) < args->array_len && !cs.cont) {
5937 nfs_resop4 *new_res = kmem_alloc(
5938 (i+1) * sizeof (nfs_resop4), KM_SLEEP);
5939 bcopy(resp->array,
5940 new_res, (i+1) * sizeof (nfs_resop4));
5941 kmem_free(resp->array,
5942 args->array_len * sizeof (nfs_resop4));
5943
5944 resp->array_len = i + 1;
5945 resp->array = new_res;
5946 }
|