1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Rick Macklem, University of Guelph
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * These functions implement the client side state handling for NFSv4.
35 * NFSv4 state handling:
36 * - A lockowner is used to determine lock contention, so it
37 * corresponds directly to a Posix pid. (1 to 1 mapping)
38 * - The correct granularity of an OpenOwner is not nearly so
39 * obvious. An OpenOwner does the following:
40 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
41 * - is used to check for Open/Share contention (not applicable to
42 * this client, since all Opens are Deny_None)
43 * As such, I considered both extreme.
44 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
45 * all Open, Close and Lock (with a new lockowner) Ops.
46 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
47 * every Open, for most servers.
48 * So, I chose to use the same mapping as I did for LockOwnwers.
49 * The main concern here is that you can end up with multiple Opens
50 * for the same File Handle, but on different OpenOwners (opens
51 * inherited from parents, grandparents...) and you do not know
52 * which of these the vnodeop close applies to. This is handled by
53 * delaying the Close Op(s) until all of the Opens have been closed.
54 * (It is not yet obvious if this is the correct granularity.)
55 * - How the code handles serialization:
56 * - For the ClientId, it uses an exclusive lock while getting its
57 * SetClientId and during recovery. Otherwise, it uses a shared
58 * lock via a reference count.
59 * - For the rest of the data structures, it uses an SMP mutex
60 * (once the nfs client is SMP safe) and doesn't sleep while
61 * manipulating the linked lists.
62 * - The serialization of Open/Close/Lock/LockU falls out in the
63 * "wash", since OpenOwners and LockOwners are both mapped from
64 * Posix pid. In other words, there is only one Posix pid using
65 * any given owner, so that owner is serialized. (If you change
66 * the granularity of the OpenOwner, then code must be added to
67 * serialize Ops on the OpenOwner.)
68 * - When to get rid of OpenOwners and LockOwners.
69 * - The function nfscl_cleanup_common() is executed after a process exits.
70 * It goes through the client list looking for all Open and Lock Owners.
71 * When one is found, it is marked "defunct" or in the case of
72 * an OpenOwner without any Opens, freed.
73 * The renew thread scans for defunct Owners and gets rid of them,
74 * if it can. The LockOwners will also be deleted when the
75 * associated Open is closed.
76 * - If the LockU or Close Op(s) fail during close in a way
77 * that could be recovered upon retry, they are relinked to the
78 * ClientId's defunct open list and retried by the renew thread
79 * until they succeed or an unmount/recovery occurs.
80 * (Since we are done with them, they do not need to be recovered.)
81 */
82
83 #include <fs/nfs/nfsport.h>
84
85 /*
86 * Global variables
87 */
88 extern struct nfsstatsv1 nfsstatsv1;
89 extern struct nfsreqhead nfsd_reqq;
90 extern u_int32_t newnfs_false, newnfs_true;
91 extern int nfscl_debuglevel;
92 extern int nfscl_enablecallb;
93 extern int nfs_numnfscbd;
94 NFSREQSPINLOCK;
95 NFSCLSTATEMUTEX;
96 int nfscl_inited = 0;
97 struct nfsclhead nfsclhead; /* Head of clientid list */
98 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
99 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
100
101 static int nfscl_delegcnt = 0;
102 static int nfscl_layoutcnt = 0;
103 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
104 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
105 static void nfscl_clrelease(struct nfsclclient *);
106 static void nfscl_cleanclient(struct nfsclclient *);
107 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
108 struct ucred *, NFSPROC_T *);
109 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
110 struct nfsmount *, struct ucred *, NFSPROC_T *);
111 static void nfscl_recover(struct nfsclclient *, bool *, struct ucred *,
112 NFSPROC_T *);
113 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
114 struct nfscllock *, int);
115 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
116 struct nfscllock **, int);
117 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
118 static u_int32_t nfscl_nextcbident(void);
119 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
120 static struct nfsclclient *nfscl_getclnt(u_int32_t);
121 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
122 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
123 int);
124 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
125 int, struct nfsclrecalllayout **);
126 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
127 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
128 int);
129 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
130 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
131 u_int8_t *, struct nfscllock **);
132 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
133 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
134 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
135 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
136 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
137 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
138 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
139 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
140 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
141 static void nfscl_totalrecall(struct nfsclclient *);
142 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
143 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
144 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
145 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
146 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
147 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
148 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
149 struct ucred *, NFSPROC_T *);
150 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
151 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
152 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
153 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
154 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
155 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
156 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
157 static void nfscl_freeopenowner(struct nfsclowner *, int);
158 static void nfscl_cleandeleg(struct nfscldeleg *);
159 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
160 struct nfsmount *, NFSPROC_T *);
161 static void nfscl_emptylockowner(struct nfscllockowner *,
162 struct nfscllockownerfhhead *);
163 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
164 struct nfsclflayouthead *);
165 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
166 uint64_t, uint32_t, uint32_t, uint32_t, char *, struct nfsclrecalllayout *);
167 static int nfscl_seq(uint32_t, uint32_t);
168 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
169 struct ucred *, NFSPROC_T *);
170 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
171 struct ucred *, NFSPROC_T *);
172
173 static short nfscberr_null[] = {
174 0,
175 0,
176 };
177
178 static short nfscberr_getattr[] = {
179 NFSERR_RESOURCE,
180 NFSERR_BADHANDLE,
181 NFSERR_BADXDR,
182 NFSERR_RESOURCE,
183 NFSERR_SERVERFAULT,
184 0,
185 };
186
187 static short nfscberr_recall[] = {
188 NFSERR_RESOURCE,
189 NFSERR_BADHANDLE,
190 NFSERR_BADSTATEID,
191 NFSERR_BADXDR,
192 NFSERR_RESOURCE,
193 NFSERR_SERVERFAULT,
194 0,
195 };
196
197 static short *nfscl_cberrmap[] = {
198 nfscberr_null,
199 nfscberr_null,
200 nfscberr_null,
201 nfscberr_getattr,
202 nfscberr_recall
203 };
204
205 #define NETFAMILY(clp) \
206 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
207
208 /*
209 * Called for an open operation.
210 * If the nfhp argument is NULL, just get an openowner.
211 */
212 int
213 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
214 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
215 struct nfsclopen **opp, int *newonep, int *retp, int lockit)
216 {
217 struct nfsclclient *clp;
218 struct nfsclowner *owp, *nowp;
219 struct nfsclopen *op = NULL, *nop = NULL;
220 struct nfscldeleg *dp;
221 struct nfsclownerhead *ohp;
222 u_int8_t own[NFSV4CL_LOCKNAMELEN];
223 int ret;
224
225 if (newonep != NULL)
226 *newonep = 0;
227 if (opp != NULL)
228 *opp = NULL;
229 if (owpp != NULL)
230 *owpp = NULL;
231
232 /*
233 * Might need one or both of these, so MALLOC them now, to
234 * avoid a tsleep() in MALLOC later.
235 */
236 nowp = malloc(sizeof (struct nfsclowner),
237 M_NFSCLOWNER, M_WAITOK);
238 if (nfhp != NULL)
239 nop = malloc(sizeof (struct nfsclopen) +
240 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
241 ret = nfscl_getcl(vp->v_mount, cred, p, 1, &clp);
242 if (ret != 0) {
243 free(nowp, M_NFSCLOWNER);
244 if (nop != NULL)
245 free(nop, M_NFSCLOPEN);
246 return (ret);
247 }
248
249 /*
250 * Get the Open iff it already exists.
251 * If none found, add the new one or return error, depending upon
252 * "create".
253 */
254 NFSLOCKCLSTATE();
255 dp = NULL;
256 /* First check the delegation list */
257 if (nfhp != NULL && usedeleg) {
258 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
259 if (dp->nfsdl_fhlen == fhlen &&
260 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
261 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
262 (dp->nfsdl_flags & NFSCLDL_WRITE))
263 break;
264 dp = NULL;
265 break;
266 }
267 }
268 }
269
270 if (dp != NULL) {
271 nfscl_filllockowner(p->td_proc, own, F_POSIX);
272 ohp = &dp->nfsdl_owner;
273 } else {
274 /* For NFSv4.1 and this option, use a single open_owner. */
275 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
276 nfscl_filllockowner(NULL, own, F_POSIX);
277 else
278 nfscl_filllockowner(p->td_proc, own, F_POSIX);
279 ohp = &clp->nfsc_owner;
280 }
281 /* Now, search for an openowner */
282 LIST_FOREACH(owp, ohp, nfsow_list) {
283 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
284 break;
285 }
286
287 /*
288 * Create a new open, as required.
289 */
290 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
291 cred, newonep);
292
293 /*
294 * Now, check the mode on the open and return the appropriate
295 * value.
296 */
297 if (retp != NULL) {
298 if (nfhp != NULL && dp != NULL && nop == NULL)
299 /* new local open on delegation */
300 *retp = NFSCLOPEN_SETCRED;
301 else
302 *retp = NFSCLOPEN_OK;
303 }
304 if (op != NULL && (amode & ~(op->nfso_mode))) {
305 op->nfso_mode |= amode;
306 if (retp != NULL && dp == NULL)
307 *retp = NFSCLOPEN_DOOPEN;
308 }
309
310 /*
311 * Serialize modifications to the open owner for multiple threads
312 * within the same process using a read/write sleep lock.
313 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
314 * by acquiring a shared lock. The close operations still use an
315 * exclusive lock for this case.
316 */
317 if (lockit != 0) {
318 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount))) {
319 /*
320 * Get a shared lock on the OpenOwner, but first
321 * wait for any pending exclusive lock, so that the
322 * exclusive locker gets priority.
323 */
324 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
325 NFSCLSTATEMUTEXPTR, NULL);
326 nfsv4_getref(&owp->nfsow_rwlock, NULL,
327 NFSCLSTATEMUTEXPTR, NULL);
328 } else
329 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
330 }
331 NFSUNLOCKCLSTATE();
332 if (nowp != NULL)
333 free(nowp, M_NFSCLOWNER);
334 if (nop != NULL)
335 free(nop, M_NFSCLOPEN);
336 if (owpp != NULL)
337 *owpp = owp;
338 if (opp != NULL)
339 *opp = op;
340 return (0);
341 }
342
343 /*
344 * Create a new open, as required.
345 */
346 static void
347 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
348 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
349 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
350 struct ucred *cred, int *newonep)
351 {
352 struct nfsclowner *owp = *owpp, *nowp;
353 struct nfsclopen *op, *nop;
354
355 if (nowpp != NULL)
356 nowp = *nowpp;
357 else
358 nowp = NULL;
359 if (nopp != NULL)
360 nop = *nopp;
361 else
362 nop = NULL;
363 if (owp == NULL && nowp != NULL) {
364 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
365 LIST_INIT(&nowp->nfsow_open);
366 nowp->nfsow_clp = clp;
367 nowp->nfsow_seqid = 0;
368 nowp->nfsow_defunct = 0;
369 nfscl_lockinit(&nowp->nfsow_rwlock);
370 if (dp != NULL) {
371 nfsstatsv1.cllocalopenowners++;
372 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
373 } else {
374 nfsstatsv1.clopenowners++;
375 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
376 }
377 owp = *owpp = nowp;
378 *nowpp = NULL;
379 if (newonep != NULL)
380 *newonep = 1;
381 }
382
383 /* If an fhp has been specified, create an Open as well. */
384 if (fhp != NULL) {
385 /* and look for the correct open, based upon FH */
386 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
387 if (op->nfso_fhlen == fhlen &&
388 !NFSBCMP(op->nfso_fh, fhp, fhlen))
389 break;
390 }
391 if (op == NULL && nop != NULL) {
392 nop->nfso_own = owp;
393 nop->nfso_mode = 0;
394 nop->nfso_opencnt = 0;
395 nop->nfso_posixlock = 1;
396 nop->nfso_fhlen = fhlen;
397 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
398 LIST_INIT(&nop->nfso_lock);
399 nop->nfso_stateid.seqid = 0;
400 nop->nfso_stateid.other[0] = 0;
401 nop->nfso_stateid.other[1] = 0;
402 nop->nfso_stateid.other[2] = 0;
403 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
404 newnfs_copyincred(cred, &nop->nfso_cred);
405 if (dp != NULL) {
406 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
407 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
408 nfsdl_list);
409 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
410 nfsstatsv1.cllocalopens++;
411 } else {
412 nfsstatsv1.clopens++;
413 }
414 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
415 *opp = nop;
416 *nopp = NULL;
417 if (newonep != NULL)
418 *newonep = 1;
419 } else {
420 *opp = op;
421 }
422 }
423 }
424
425 /*
426 * Called to find/add a delegation to a client.
427 */
428 int
429 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
430 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
431 {
432 struct nfscldeleg *dp = *dpp, *tdp;
433
434 /*
435 * First, if we have received a Read delegation for a file on a
436 * read/write file system, just return it, because they aren't
437 * useful, imho.
438 */
439 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
440 (dp->nfsdl_flags & NFSCLDL_READ)) {
441 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
442 free(dp, M_NFSCLDELEG);
443 *dpp = NULL;
444 return (0);
445 }
446
447 /* Look for the correct deleg, based upon FH */
448 NFSLOCKCLSTATE();
449 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
450 if (tdp == NULL) {
451 if (dp == NULL) {
452 NFSUNLOCKCLSTATE();
453 return (NFSERR_BADSTATEID);
454 }
455 *dpp = NULL;
456 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
457 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
458 nfsdl_hash);
459 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
460 nfsstatsv1.cldelegates++;
461 nfscl_delegcnt++;
462 } else {
463 /*
464 * Delegation already exists, what do we do if a new one??
465 */
466 if (dp != NULL) {
467 printf("Deleg already exists!\n");
468 free(dp, M_NFSCLDELEG);
469 *dpp = NULL;
470 } else {
471 *dpp = tdp;
472 }
473 }
474 NFSUNLOCKCLSTATE();
475 return (0);
476 }
477
478 /*
479 * Find a delegation for this file handle. Return NULL upon failure.
480 */
481 static struct nfscldeleg *
482 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
483 {
484 struct nfscldeleg *dp;
485
486 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
487 if (dp->nfsdl_fhlen == fhlen &&
488 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
489 break;
490 }
491 return (dp);
492 }
493
494 /*
495 * Get a stateid for an I/O operation. First, look for an open and iff
496 * found, return either a lockowner stateid or the open stateid.
497 * If no Open is found, just return error and the special stateid of all zeros.
498 */
499 int
500 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
501 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
502 void **lckpp)
503 {
504 struct nfsclclient *clp;
505 struct nfsclowner *owp;
506 struct nfsclopen *op = NULL, *top;
507 struct nfscllockowner *lp;
508 struct nfscldeleg *dp;
509 struct nfsnode *np;
510 struct nfsmount *nmp;
511 u_int8_t own[NFSV4CL_LOCKNAMELEN];
512 int error, done;
513
514 *lckpp = NULL;
515 /*
516 * Initially, just set the special stateid of all zeros.
517 * (Don't do this for a DS, since the special stateid can't be used.)
518 */
519 if (fords == 0) {
520 stateidp->seqid = 0;
521 stateidp->other[0] = 0;
522 stateidp->other[1] = 0;
523 stateidp->other[2] = 0;
524 }
525 if (vnode_vtype(vp) != VREG)
526 return (EISDIR);
527 np = VTONFS(vp);
528 nmp = VFSTONFS(vp->v_mount);
529 NFSLOCKCLSTATE();
530 clp = nfscl_findcl(nmp);
531 if (clp == NULL) {
532 NFSUNLOCKCLSTATE();
533 return (EACCES);
534 }
535
536 /*
537 * Wait for recovery to complete.
538 */
539 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
540 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
541 PZERO, "nfsrecvr", NULL);
542
543 /*
544 * First, look for a delegation.
545 */
546 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
547 if (dp->nfsdl_fhlen == fhlen &&
548 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
549 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
550 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
551 stateidp->seqid = dp->nfsdl_stateid.seqid;
552 stateidp->other[0] = dp->nfsdl_stateid.other[0];
553 stateidp->other[1] = dp->nfsdl_stateid.other[1];
554 stateidp->other[2] = dp->nfsdl_stateid.other[2];
555 if (!(np->n_flag & NDELEGRECALL)) {
556 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
557 nfsdl_list);
558 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
559 nfsdl_list);
560 dp->nfsdl_timestamp = NFSD_MONOSEC +
561 120;
562 dp->nfsdl_rwlock.nfslock_usecnt++;
563 *lckpp = (void *)&dp->nfsdl_rwlock;
564 }
565 NFSUNLOCKCLSTATE();
566 return (0);
567 }
568 break;
569 }
570 }
571
572 if (p != NULL) {
573 /*
574 * If p != NULL, we want to search the parentage tree
575 * for a matching OpenOwner and use that.
576 */
577 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
578 nfscl_filllockowner(NULL, own, F_POSIX);
579 else
580 nfscl_filllockowner(p->td_proc, own, F_POSIX);
581 lp = NULL;
582 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
583 mode, &lp, &op);
584 if (error == 0 && lp != NULL && fords == 0) {
585 /* Don't return a lock stateid for a DS. */
586 stateidp->seqid =
587 lp->nfsl_stateid.seqid;
588 stateidp->other[0] =
589 lp->nfsl_stateid.other[0];
590 stateidp->other[1] =
591 lp->nfsl_stateid.other[1];
592 stateidp->other[2] =
593 lp->nfsl_stateid.other[2];
594 NFSUNLOCKCLSTATE();
595 return (0);
596 }
597 }
598 if (op == NULL) {
599 /* If not found, just look for any OpenOwner that will work. */
600 top = NULL;
601 done = 0;
602 owp = LIST_FIRST(&clp->nfsc_owner);
603 while (!done && owp != NULL) {
604 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
605 if (op->nfso_fhlen == fhlen &&
606 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
607 if (top == NULL && (op->nfso_mode &
608 NFSV4OPEN_ACCESSWRITE) != 0 &&
609 (mode & NFSV4OPEN_ACCESSREAD) != 0)
610 top = op;
611 if ((mode & op->nfso_mode) == mode) {
612 done = 1;
613 break;
614 }
615 }
616 }
617 if (!done)
618 owp = LIST_NEXT(owp, nfsow_list);
619 }
620 if (!done) {
621 NFSCL_DEBUG(2, "openmode top=%p\n", top);
622 if (top == NULL || NFSHASOPENMODE(nmp)) {
623 NFSUNLOCKCLSTATE();
624 return (ENOENT);
625 } else
626 op = top;
627 }
628 /*
629 * For read aheads or write behinds, use the open cred.
630 * A read ahead or write behind is indicated by p == NULL.
631 */
632 if (p == NULL)
633 newnfs_copycred(&op->nfso_cred, cred);
634 }
635
636 /*
637 * No lock stateid, so return the open stateid.
638 */
639 stateidp->seqid = op->nfso_stateid.seqid;
640 stateidp->other[0] = op->nfso_stateid.other[0];
641 stateidp->other[1] = op->nfso_stateid.other[1];
642 stateidp->other[2] = op->nfso_stateid.other[2];
643 NFSUNLOCKCLSTATE();
644 return (0);
645 }
646
647 /*
648 * Search for a matching file, mode and, optionally, lockowner.
649 */
650 static int
651 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
652 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
653 struct nfscllockowner **lpp, struct nfsclopen **opp)
654 {
655 struct nfsclowner *owp;
656 struct nfsclopen *op, *rop, *rop2;
657 struct nfscllockowner *lp;
658 int keep_looping;
659
660 if (lpp != NULL)
661 *lpp = NULL;
662 /*
663 * rop will be set to the open to be returned. There are three
664 * variants of this, all for an open of the correct file:
665 * 1 - A match of lockown.
666 * 2 - A match of the openown, when no lockown match exists.
667 * 3 - A match for any open, if no openown or lockown match exists.
668 * Looking for #2 over #3 probably isn't necessary, but since
669 * RFC3530 is vague w.r.t. the relationship between openowners and
670 * lockowners, I think this is the safer way to go.
671 */
672 rop = NULL;
673 rop2 = NULL;
674 keep_looping = 1;
675 /* Search the client list */
676 owp = LIST_FIRST(ohp);
677 while (owp != NULL && keep_looping != 0) {
678 /* and look for the correct open */
679 op = LIST_FIRST(&owp->nfsow_open);
680 while (op != NULL && keep_looping != 0) {
681 if (op->nfso_fhlen == fhlen &&
682 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
683 && (op->nfso_mode & mode) == mode) {
684 if (lpp != NULL) {
685 /* Now look for a matching lockowner. */
686 LIST_FOREACH(lp, &op->nfso_lock,
687 nfsl_list) {
688 if (!NFSBCMP(lp->nfsl_owner,
689 lockown,
690 NFSV4CL_LOCKNAMELEN)) {
691 *lpp = lp;
692 rop = op;
693 keep_looping = 0;
694 break;
695 }
696 }
697 }
698 if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
699 openown, NFSV4CL_LOCKNAMELEN)) {
700 rop = op;
701 if (lpp == NULL)
702 keep_looping = 0;
703 }
704 if (rop2 == NULL)
705 rop2 = op;
706 }
707 op = LIST_NEXT(op, nfso_list);
708 }
709 owp = LIST_NEXT(owp, nfsow_list);
710 }
711 if (rop == NULL)
712 rop = rop2;
713 if (rop == NULL)
714 return (EBADF);
715 *opp = rop;
716 return (0);
717 }
718
719 /*
720 * Release use of an open owner. Called when open operations are done
721 * with the open owner.
722 */
723 void
724 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
725 __unused int error, __unused int candelete, int unlocked)
726 {
727
728 if (owp == NULL)
729 return;
730 NFSLOCKCLSTATE();
731 if (unlocked == 0) {
732 if (NFSHASONEOPENOWN(nmp))
733 nfsv4_relref(&owp->nfsow_rwlock);
734 else
735 nfscl_lockunlock(&owp->nfsow_rwlock);
736 }
737 nfscl_clrelease(owp->nfsow_clp);
738 NFSUNLOCKCLSTATE();
739 }
740
741 /*
742 * Release use of an open structure under an open owner.
743 */
744 void
745 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
746 int candelete)
747 {
748 struct nfsclclient *clp;
749 struct nfsclowner *owp;
750
751 if (op == NULL)
752 return;
753 NFSLOCKCLSTATE();
754 owp = op->nfso_own;
755 if (NFSHASONEOPENOWN(nmp))
756 nfsv4_relref(&owp->nfsow_rwlock);
757 else
758 nfscl_lockunlock(&owp->nfsow_rwlock);
759 clp = owp->nfsow_clp;
760 if (error && candelete && op->nfso_opencnt == 0)
761 nfscl_freeopen(op, 0);
762 nfscl_clrelease(clp);
763 NFSUNLOCKCLSTATE();
764 }
765
766 /*
767 * Called to get a clientid structure. It will optionally lock the
768 * client data structures to do the SetClientId/SetClientId_confirm,
769 * but will release that lock and return the clientid with a reference
770 * count on it.
771 * If the "cred" argument is NULL, a new clientid should not be created.
772 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
773 * be done.
774 * The start_renewthread argument tells nfscl_getcl() to start a renew
775 * thread if this creates a new clp.
776 * It always clpp with a reference count on it, unless returning an error.
777 */
778 int
779 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
780 int start_renewthread, struct nfsclclient **clpp)
781 {
782 struct nfsclclient *clp;
783 struct nfsclclient *newclp = NULL;
784 struct nfsmount *nmp;
785 char uuid[HOSTUUIDLEN];
786 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
787 u_int16_t idlen = 0;
788
789 nmp = VFSTONFS(mp);
790 if (cred != NULL) {
791 getcredhostuuid(cred, uuid, sizeof uuid);
792 idlen = strlen(uuid);
793 if (idlen > 0)
794 idlen += sizeof (u_int64_t);
795 else
796 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
797 newclp = malloc(
798 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
799 M_WAITOK | M_ZERO);
800 }
801 NFSLOCKCLSTATE();
802 /*
803 * If a forced dismount is already in progress, don't
804 * allocate a new clientid and get out now. For the case where
805 * clp != NULL, this is a harmless optimization.
806 */
807 if (NFSCL_FORCEDISM(mp)) {
808 NFSUNLOCKCLSTATE();
809 if (newclp != NULL)
810 free(newclp, M_NFSCLCLIENT);
811 return (EBADF);
812 }
813 clp = nmp->nm_clp;
814 if (clp == NULL) {
815 if (newclp == NULL) {
816 NFSUNLOCKCLSTATE();
817 return (EACCES);
818 }
819 clp = newclp;
820 clp->nfsc_idlen = idlen;
821 LIST_INIT(&clp->nfsc_owner);
822 TAILQ_INIT(&clp->nfsc_deleg);
823 TAILQ_INIT(&clp->nfsc_layout);
824 LIST_INIT(&clp->nfsc_devinfo);
825 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
826 LIST_INIT(&clp->nfsc_deleghash[i]);
827 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
828 LIST_INIT(&clp->nfsc_layouthash[i]);
829 clp->nfsc_flags = NFSCLFLAGS_INITED;
830 clp->nfsc_clientidrev = 1;
831 clp->nfsc_cbident = nfscl_nextcbident();
832 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
833 clp->nfsc_idlen);
834 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
835 nmp->nm_clp = clp;
836 clp->nfsc_nmp = nmp;
837 NFSUNLOCKCLSTATE();
838 if (start_renewthread != 0)
839 nfscl_start_renewthread(clp);
840 } else {
841 NFSUNLOCKCLSTATE();
842 if (newclp != NULL)
843 free(newclp, M_NFSCLCLIENT);
844 }
845 NFSLOCKCLSTATE();
846 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
847 !NFSCL_FORCEDISM(mp))
848 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
849 NFSCLSTATEMUTEXPTR, mp);
850 if (igotlock == 0) {
851 /*
852 * Call nfsv4_lock() with "iwantlock == 0" so that it will
853 * wait for a pending exclusive lock request. This gives the
854 * exclusive lock request priority over this shared lock
855 * request.
856 * An exclusive lock on nfsc_lock is used mainly for server
857 * crash recoveries.
858 */
859 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp);
860 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
861 }
862 if (igotlock == 0 && NFSCL_FORCEDISM(mp)) {
863 /*
864 * Both nfsv4_lock() and nfsv4_getref() know to check
865 * for NFSCL_FORCEDISM() and return without sleeping to
866 * wait for the exclusive lock to be released, since it
867 * might be held by nfscl_umount() and we need to get out
868 * now for that case and not wait until nfscl_umount()
869 * releases it.
870 */
871 NFSUNLOCKCLSTATE();
872 return (EBADF);
873 }
874 NFSUNLOCKCLSTATE();
875
876 /*
877 * If it needs a clientid, do the setclientid now.
878 */
879 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
880 if (!igotlock)
881 panic("nfscl_clget");
882 if (p == NULL || cred == NULL) {
883 NFSLOCKCLSTATE();
884 nfsv4_unlock(&clp->nfsc_lock, 0);
885 NFSUNLOCKCLSTATE();
886 return (EACCES);
887 }
888 /*
889 * If RFC3530 Sec. 14.2.33 is taken literally,
890 * NFSERR_CLIDINUSE will be returned persistently for the
891 * case where a new mount of the same file system is using
892 * a different principal. In practice, NFSERR_CLIDINUSE is
893 * only returned when there is outstanding unexpired state
894 * on the clientid. As such, try for twice the lease
895 * interval, if we know what that is. Otherwise, make a
896 * wild ass guess.
897 * The case of returning NFSERR_STALECLIENTID is far less
898 * likely, but might occur if there is a significant delay
899 * between doing the SetClientID and SetClientIDConfirm Ops,
900 * such that the server throws away the clientid before
901 * receiving the SetClientIDConfirm.
902 */
903 if (clp->nfsc_renew > 0)
904 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
905 else
906 clidinusedelay = 120;
907 trystalecnt = 3;
908 do {
909 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
910 if (error == NFSERR_STALECLIENTID ||
911 error == NFSERR_STALEDONTRECOVER ||
912 error == NFSERR_BADSESSION ||
913 error == NFSERR_CLIDINUSE) {
914 (void) nfs_catnap(PZERO, error, "nfs_setcl");
915 }
916 } while (((error == NFSERR_STALECLIENTID ||
917 error == NFSERR_BADSESSION ||
918 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
919 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
920 if (error) {
921 NFSLOCKCLSTATE();
922 nfsv4_unlock(&clp->nfsc_lock, 0);
923 NFSUNLOCKCLSTATE();
924 return (error);
925 }
926 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
927 }
928 if (igotlock) {
929 NFSLOCKCLSTATE();
930 nfsv4_unlock(&clp->nfsc_lock, 1);
931 NFSUNLOCKCLSTATE();
932 }
933
934 *clpp = clp;
935 return (0);
936 }
937
938 /*
939 * Get a reference to a clientid and return it, if valid.
940 */
941 struct nfsclclient *
942 nfscl_findcl(struct nfsmount *nmp)
943 {
944 struct nfsclclient *clp;
945
946 clp = nmp->nm_clp;
947 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
948 return (NULL);
949 return (clp);
950 }
951
952 /*
953 * Release the clientid structure. It may be locked or reference counted.
954 */
955 static void
956 nfscl_clrelease(struct nfsclclient *clp)
957 {
958
959 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
960 nfsv4_unlock(&clp->nfsc_lock, 0);
961 else
962 nfsv4_relref(&clp->nfsc_lock);
963 }
964
965 /*
966 * External call for nfscl_clrelease.
967 */
968 void
969 nfscl_clientrelease(struct nfsclclient *clp)
970 {
971
972 NFSLOCKCLSTATE();
973 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
974 nfsv4_unlock(&clp->nfsc_lock, 0);
975 else
976 nfsv4_relref(&clp->nfsc_lock);
977 NFSUNLOCKCLSTATE();
978 }
979
980 /*
981 * Called when wanting to lock a byte region.
982 */
983 int
984 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
985 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
986 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
987 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
988 {
989 struct nfscllockowner *lp;
990 struct nfsclopen *op;
991 struct nfsclclient *clp;
992 struct nfscllockowner *nlp;
993 struct nfscllock *nlop, *otherlop;
994 struct nfscldeleg *dp = NULL, *ldp = NULL;
995 struct nfscllockownerhead *lhp = NULL;
996 struct nfsnode *np;
997 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
998 u_int8_t *openownp;
999 int error = 0, ret, donelocally = 0;
1000 u_int32_t mode;
1001
1002 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1003 mode = 0;
1004 np = VTONFS(vp);
1005 *lpp = NULL;
1006 lp = NULL;
1007 *newonep = 0;
1008 *donelocallyp = 0;
1009
1010 /*
1011 * Might need these, so MALLOC them now, to
1012 * avoid a tsleep() in MALLOC later.
1013 */
1014 nlp = malloc(
1015 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1016 otherlop = malloc(
1017 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1018 nlop = malloc(
1019 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1020 nlop->nfslo_type = type;
1021 nlop->nfslo_first = off;
1022 if (len == NFS64BITSSET) {
1023 nlop->nfslo_end = NFS64BITSSET;
1024 } else {
1025 nlop->nfslo_end = off + len;
1026 if (nlop->nfslo_end <= nlop->nfslo_first)
1027 error = NFSERR_INVAL;
1028 }
1029
1030 if (!error) {
1031 if (recovery)
1032 clp = rclp;
1033 else
1034 error = nfscl_getcl(vp->v_mount, cred, p, 1, &clp);
1035 }
1036 if (error) {
1037 free(nlp, M_NFSCLLOCKOWNER);
1038 free(otherlop, M_NFSCLLOCK);
1039 free(nlop, M_NFSCLLOCK);
1040 return (error);
1041 }
1042
1043 op = NULL;
1044 if (recovery) {
1045 ownp = rownp;
1046 openownp = ropenownp;
1047 } else {
1048 nfscl_filllockowner(id, own, flags);
1049 ownp = own;
1050 if (NFSHASONEOPENOWN(VFSTONFS(vp->v_mount)))
1051 nfscl_filllockowner(NULL, openown, F_POSIX);
1052 else
1053 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1054 openownp = openown;
1055 }
1056 if (!recovery) {
1057 NFSLOCKCLSTATE();
1058 /*
1059 * First, search for a delegation. If one exists for this file,
1060 * the lock can be done locally against it, so long as there
1061 * isn't a local lock conflict.
1062 */
1063 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1064 np->n_fhp->nfh_len);
1065 /* Just sanity check for correct type of delegation */
1066 if (dp != NULL && ((dp->nfsdl_flags &
1067 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1068 (type == F_WRLCK &&
1069 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1070 dp = NULL;
1071 }
1072 if (dp != NULL) {
1073 /* Now, find an open and maybe a lockowner. */
1074 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
1075 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1076 if (ret)
1077 ret = nfscl_getopen(&clp->nfsc_owner,
1078 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1079 ownp, mode, NULL, &op);
1080 if (!ret) {
1081 lhp = &dp->nfsdl_lock;
1082 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1083 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1084 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1085 donelocally = 1;
1086 } else {
1087 dp = NULL;
1088 }
1089 }
1090 if (!donelocally) {
1091 /*
1092 * Get the related Open and maybe lockowner.
1093 */
1094 error = nfscl_getopen(&clp->nfsc_owner,
1095 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1096 ownp, mode, &lp, &op);
1097 if (!error)
1098 lhp = &op->nfso_lock;
1099 }
1100 if (!error && !recovery)
1101 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1102 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1103 if (error) {
1104 if (!recovery) {
1105 nfscl_clrelease(clp);
1106 NFSUNLOCKCLSTATE();
1107 }
1108 free(nlp, M_NFSCLLOCKOWNER);
1109 free(otherlop, M_NFSCLLOCK);
1110 free(nlop, M_NFSCLLOCK);
1111 return (error);
1112 }
1113
1114 /*
1115 * Ok, see if a lockowner exists and create one, as required.
1116 */
1117 if (lp == NULL)
1118 LIST_FOREACH(lp, lhp, nfsl_list) {
1119 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1120 break;
1121 }
1122 if (lp == NULL) {
1123 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1124 if (recovery)
1125 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1126 NFSV4CL_LOCKNAMELEN);
1127 else
1128 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1129 NFSV4CL_LOCKNAMELEN);
1130 nlp->nfsl_seqid = 0;
1131 nlp->nfsl_lockflags = flags;
1132 nlp->nfsl_inprog = NULL;
1133 nfscl_lockinit(&nlp->nfsl_rwlock);
1134 LIST_INIT(&nlp->nfsl_lock);
1135 if (donelocally) {
1136 nlp->nfsl_open = NULL;
1137 nfsstatsv1.cllocallockowners++;
1138 } else {
1139 nlp->nfsl_open = op;
1140 nfsstatsv1.cllockowners++;
1141 }
1142 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1143 lp = nlp;
1144 nlp = NULL;
1145 *newonep = 1;
1146 }
1147
1148 /*
1149 * Now, update the byte ranges for locks.
1150 */
1151 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1152 if (!ret)
1153 donelocally = 1;
1154 if (donelocally) {
1155 *donelocallyp = 1;
1156 if (!recovery)
1157 nfscl_clrelease(clp);
1158 } else {
1159 /*
1160 * Serial modifications on the lock owner for multiple threads
1161 * for the same process using a read/write lock.
1162 */
1163 if (!recovery)
1164 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1165 }
1166 if (!recovery)
1167 NFSUNLOCKCLSTATE();
1168
1169 if (nlp)
1170 free(nlp, M_NFSCLLOCKOWNER);
1171 if (nlop)
1172 free(nlop, M_NFSCLLOCK);
1173 if (otherlop)
1174 free(otherlop, M_NFSCLLOCK);
1175
1176 *lpp = lp;
1177 return (0);
1178 }
1179
1180 /*
1181 * Called to unlock a byte range, for LockU.
1182 */
1183 int
1184 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1185 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1186 struct nfsclclient *clp, void *id, int flags,
1187 struct nfscllockowner **lpp, int *dorpcp)
1188 {
1189 struct nfscllockowner *lp;
1190 struct nfsclowner *owp;
1191 struct nfsclopen *op;
1192 struct nfscllock *nlop, *other_lop = NULL;
1193 struct nfscldeleg *dp;
1194 struct nfsnode *np;
1195 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1196 int ret = 0, fnd;
1197
1198 np = VTONFS(vp);
1199 *lpp = NULL;
1200 *dorpcp = 0;
1201
1202 /*
1203 * Might need these, so MALLOC them now, to
1204 * avoid a tsleep() in MALLOC later.
1205 */
1206 nlop = malloc(
1207 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1208 nlop->nfslo_type = F_UNLCK;
1209 nlop->nfslo_first = off;
1210 if (len == NFS64BITSSET) {
1211 nlop->nfslo_end = NFS64BITSSET;
1212 } else {
1213 nlop->nfslo_end = off + len;
1214 if (nlop->nfslo_end <= nlop->nfslo_first) {
1215 free(nlop, M_NFSCLLOCK);
1216 return (NFSERR_INVAL);
1217 }
1218 }
1219 if (callcnt == 0) {
1220 other_lop = malloc(
1221 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1222 *other_lop = *nlop;
1223 }
1224 nfscl_filllockowner(id, own, flags);
1225 dp = NULL;
1226 NFSLOCKCLSTATE();
1227 if (callcnt == 0)
1228 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1229 np->n_fhp->nfh_len);
1230
1231 /*
1232 * First, unlock any local regions on a delegation.
1233 */
1234 if (dp != NULL) {
1235 /* Look for this lockowner. */
1236 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1237 if (!NFSBCMP(lp->nfsl_owner, own,
1238 NFSV4CL_LOCKNAMELEN))
1239 break;
1240 }
1241 if (lp != NULL)
1242 /* Use other_lop, so nlop is still available */
1243 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1244 }
1245
1246 /*
1247 * Now, find a matching open/lockowner that hasn't already been done,
1248 * as marked by nfsl_inprog.
1249 */
1250 lp = NULL;
1251 fnd = 0;
1252 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1253 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1254 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1255 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1256 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1257 if (lp->nfsl_inprog == NULL &&
1258 !NFSBCMP(lp->nfsl_owner, own,
1259 NFSV4CL_LOCKNAMELEN)) {
1260 fnd = 1;
1261 break;
1262 }
1263 }
1264 if (fnd)
1265 break;
1266 }
1267 }
1268 if (fnd)
1269 break;
1270 }
1271
1272 if (lp != NULL) {
1273 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1274 if (ret)
1275 *dorpcp = 1;
1276 /*
1277 * Serial modifications on the lock owner for multiple
1278 * threads for the same process using a read/write lock.
1279 */
1280 lp->nfsl_inprog = p;
1281 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1282 *lpp = lp;
1283 }
1284 NFSUNLOCKCLSTATE();
1285 if (nlop)
1286 free(nlop, M_NFSCLLOCK);
1287 if (other_lop)
1288 free(other_lop, M_NFSCLLOCK);
1289 return (0);
1290 }
1291
1292 /*
1293 * Release all lockowners marked in progess for this process and file.
1294 */
1295 void
1296 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1297 void *id, int flags)
1298 {
1299 struct nfsclowner *owp;
1300 struct nfsclopen *op;
1301 struct nfscllockowner *lp;
1302 struct nfsnode *np;
1303 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1304
1305 np = VTONFS(vp);
1306 nfscl_filllockowner(id, own, flags);
1307 NFSLOCKCLSTATE();
1308 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1309 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1310 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1311 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1312 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1313 if (lp->nfsl_inprog == p &&
1314 !NFSBCMP(lp->nfsl_owner, own,
1315 NFSV4CL_LOCKNAMELEN)) {
1316 lp->nfsl_inprog = NULL;
1317 nfscl_lockunlock(&lp->nfsl_rwlock);
1318 }
1319 }
1320 }
1321 }
1322 }
1323 nfscl_clrelease(clp);
1324 NFSUNLOCKCLSTATE();
1325 }
1326
1327 /*
1328 * Called to find out if any bytes within the byte range specified are
1329 * write locked by the calling process. Used to determine if flushing
1330 * is required before a LockU.
1331 * If in doubt, return 1, so the flush will occur.
1332 */
1333 int
1334 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1335 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1336 {
1337 struct nfsclowner *owp;
1338 struct nfscllockowner *lp;
1339 struct nfsclopen *op;
1340 struct nfsclclient *clp;
1341 struct nfscllock *lop;
1342 struct nfscldeleg *dp;
1343 struct nfsnode *np;
1344 u_int64_t off, end;
1345 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1346 int error = 0;
1347
1348 np = VTONFS(vp);
1349 switch (fl->l_whence) {
1350 case SEEK_SET:
1351 case SEEK_CUR:
1352 /*
1353 * Caller is responsible for adding any necessary offset
1354 * when SEEK_CUR is used.
1355 */
1356 off = fl->l_start;
1357 break;
1358 case SEEK_END:
1359 off = np->n_size + fl->l_start;
1360 break;
1361 default:
1362 return (1);
1363 }
1364 if (fl->l_len != 0) {
1365 end = off + fl->l_len;
1366 if (end < off)
1367 return (1);
1368 } else {
1369 end = NFS64BITSSET;
1370 }
1371
1372 error = nfscl_getcl(vp->v_mount, cred, p, 1, &clp);
1373 if (error)
1374 return (1);
1375 nfscl_filllockowner(id, own, flags);
1376 NFSLOCKCLSTATE();
1377
1378 /*
1379 * First check the delegation locks.
1380 */
1381 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1382 if (dp != NULL) {
1383 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1384 if (!NFSBCMP(lp->nfsl_owner, own,
1385 NFSV4CL_LOCKNAMELEN))
1386 break;
1387 }
1388 if (lp != NULL) {
1389 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1390 if (lop->nfslo_first >= end)
1391 break;
1392 if (lop->nfslo_end <= off)
1393 continue;
1394 if (lop->nfslo_type == F_WRLCK) {
1395 nfscl_clrelease(clp);
1396 NFSUNLOCKCLSTATE();
1397 return (1);
1398 }
1399 }
1400 }
1401 }
1402
1403 /*
1404 * Now, check state against the server.
1405 */
1406 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1407 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1408 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1409 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1410 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1411 if (!NFSBCMP(lp->nfsl_owner, own,
1412 NFSV4CL_LOCKNAMELEN))
1413 break;
1414 }
1415 if (lp != NULL) {
1416 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1417 if (lop->nfslo_first >= end)
1418 break;
1419 if (lop->nfslo_end <= off)
1420 continue;
1421 if (lop->nfslo_type == F_WRLCK) {
1422 nfscl_clrelease(clp);
1423 NFSUNLOCKCLSTATE();
1424 return (1);
1425 }
1426 }
1427 }
1428 }
1429 }
1430 }
1431 nfscl_clrelease(clp);
1432 NFSUNLOCKCLSTATE();
1433 return (0);
1434 }
1435
1436 /*
1437 * Release a byte range lock owner structure.
1438 */
1439 void
1440 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1441 {
1442 struct nfsclclient *clp;
1443
1444 if (lp == NULL)
1445 return;
1446 NFSLOCKCLSTATE();
1447 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1448 if (error != 0 && candelete &&
1449 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1450 nfscl_freelockowner(lp, 0);
1451 else
1452 nfscl_lockunlock(&lp->nfsl_rwlock);
1453 nfscl_clrelease(clp);
1454 NFSUNLOCKCLSTATE();
1455 }
1456
1457 /*
1458 * Free up an open structure and any associated byte range lock structures.
1459 */
1460 void
1461 nfscl_freeopen(struct nfsclopen *op, int local)
1462 {
1463
1464 LIST_REMOVE(op, nfso_list);
1465 nfscl_freealllocks(&op->nfso_lock, local);
1466 free(op, M_NFSCLOPEN);
1467 if (local)
1468 nfsstatsv1.cllocalopens--;
1469 else
1470 nfsstatsv1.clopens--;
1471 }
1472
1473 /*
1474 * Free up all lock owners and associated locks.
1475 */
1476 static void
1477 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1478 {
1479 struct nfscllockowner *lp, *nlp;
1480
1481 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1482 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1483 panic("nfscllckw");
1484 nfscl_freelockowner(lp, local);
1485 }
1486 }
1487
1488 /*
1489 * Called for an Open when NFSERR_EXPIRED is received from the server.
1490 * If there are no byte range locks nor a Share Deny lost, try to do a
1491 * fresh Open. Otherwise, free the open.
1492 */
1493 static int
1494 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1495 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1496 {
1497 struct nfscllockowner *lp;
1498 struct nfscldeleg *dp;
1499 int mustdelete = 0, error;
1500
1501 /*
1502 * Look for any byte range lock(s).
1503 */
1504 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1505 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1506 mustdelete = 1;
1507 break;
1508 }
1509 }
1510
1511 /*
1512 * If no byte range lock(s) nor a Share deny, try to re-open.
1513 */
1514 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1515 newnfs_copycred(&op->nfso_cred, cred);
1516 dp = NULL;
1517 error = nfsrpc_reopen(nmp, op->nfso_fh,
1518 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1519 if (error) {
1520 mustdelete = 1;
1521 if (dp != NULL) {
1522 free(dp, M_NFSCLDELEG);
1523 dp = NULL;
1524 }
1525 }
1526 if (dp != NULL)
1527 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1528 op->nfso_fhlen, cred, p, &dp);
1529 }
1530
1531 /*
1532 * If a byte range lock or Share deny or couldn't re-open, free it.
1533 */
1534 if (mustdelete)
1535 nfscl_freeopen(op, 0);
1536 return (mustdelete);
1537 }
1538
1539 /*
1540 * Free up an open owner structure.
1541 */
1542 static void
1543 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1544 {
1545
1546 LIST_REMOVE(owp, nfsow_list);
1547 free(owp, M_NFSCLOWNER);
1548 if (local)
1549 nfsstatsv1.cllocalopenowners--;
1550 else
1551 nfsstatsv1.clopenowners--;
1552 }
1553
1554 /*
1555 * Free up a byte range lock owner structure.
1556 */
1557 void
1558 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1559 {
1560 struct nfscllock *lop, *nlop;
1561
1562 LIST_REMOVE(lp, nfsl_list);
1563 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1564 nfscl_freelock(lop, local);
1565 }
1566 free(lp, M_NFSCLLOCKOWNER);
1567 if (local)
1568 nfsstatsv1.cllocallockowners--;
1569 else
1570 nfsstatsv1.cllockowners--;
1571 }
1572
1573 /*
1574 * Free up a byte range lock structure.
1575 */
1576 void
1577 nfscl_freelock(struct nfscllock *lop, int local)
1578 {
1579
1580 LIST_REMOVE(lop, nfslo_list);
1581 free(lop, M_NFSCLLOCK);
1582 if (local)
1583 nfsstatsv1.cllocallocks--;
1584 else
1585 nfsstatsv1.cllocks--;
1586 }
1587
1588 /*
1589 * Clean out the state related to a delegation.
1590 */
1591 static void
1592 nfscl_cleandeleg(struct nfscldeleg *dp)
1593 {
1594 struct nfsclowner *owp, *nowp;
1595 struct nfsclopen *op;
1596
1597 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1598 op = LIST_FIRST(&owp->nfsow_open);
1599 if (op != NULL) {
1600 if (LIST_NEXT(op, nfso_list) != NULL)
1601 panic("nfscleandel");
1602 nfscl_freeopen(op, 1);
1603 }
1604 nfscl_freeopenowner(owp, 1);
1605 }
1606 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1607 }
1608
1609 /*
1610 * Free a delegation.
1611 */
1612 static void
1613 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1614 {
1615
1616 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1617 LIST_REMOVE(dp, nfsdl_hash);
1618 free(dp, M_NFSCLDELEG);
1619 nfsstatsv1.cldelegates--;
1620 nfscl_delegcnt--;
1621 }
1622
1623 /*
1624 * Free up all state related to this client structure.
1625 */
1626 static void
1627 nfscl_cleanclient(struct nfsclclient *clp)
1628 {
1629 struct nfsclowner *owp, *nowp;
1630 struct nfsclopen *op, *nop;
1631 struct nfscllayout *lyp, *nlyp;
1632 struct nfscldevinfo *dip, *ndip;
1633
1634 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1635 nfscl_freelayout(lyp);
1636
1637 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip)
1638 nfscl_freedevinfo(dip);
1639
1640 /* Now, all the OpenOwners, etc. */
1641 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1642 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1643 nfscl_freeopen(op, 0);
1644 }
1645 nfscl_freeopenowner(owp, 0);
1646 }
1647 }
1648
1649 /*
1650 * Called when an NFSERR_EXPIRED is received from the server.
1651 */
1652 static void
1653 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1654 struct ucred *cred, NFSPROC_T *p)
1655 {
1656 struct nfsclowner *owp, *nowp, *towp;
1657 struct nfsclopen *op, *nop, *top;
1658 struct nfscldeleg *dp, *ndp;
1659 int ret, printed = 0;
1660
1661 /*
1662 * First, merge locally issued Opens into the list for the server.
1663 */
1664 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1665 while (dp != NULL) {
1666 ndp = TAILQ_NEXT(dp, nfsdl_list);
1667 owp = LIST_FIRST(&dp->nfsdl_owner);
1668 while (owp != NULL) {
1669 nowp = LIST_NEXT(owp, nfsow_list);
1670 op = LIST_FIRST(&owp->nfsow_open);
1671 if (op != NULL) {
1672 if (LIST_NEXT(op, nfso_list) != NULL)
1673 panic("nfsclexp");
1674 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1675 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1676 NFSV4CL_LOCKNAMELEN))
1677 break;
1678 }
1679 if (towp != NULL) {
1680 /* Merge opens in */
1681 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1682 if (top->nfso_fhlen == op->nfso_fhlen &&
1683 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1684 op->nfso_fhlen)) {
1685 top->nfso_mode |= op->nfso_mode;
1686 top->nfso_opencnt += op->nfso_opencnt;
1687 break;
1688 }
1689 }
1690 if (top == NULL) {
1691 /* Just add the open to the owner list */
1692 LIST_REMOVE(op, nfso_list);
1693 op->nfso_own = towp;
1694 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1695 nfsstatsv1.cllocalopens--;
1696 nfsstatsv1.clopens++;
1697 }
1698 } else {
1699 /* Just add the openowner to the client list */
1700 LIST_REMOVE(owp, nfsow_list);
1701 owp->nfsow_clp = clp;
1702 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1703 nfsstatsv1.cllocalopenowners--;
1704 nfsstatsv1.clopenowners++;
1705 nfsstatsv1.cllocalopens--;
1706 nfsstatsv1.clopens++;
1707 }
1708 }
1709 owp = nowp;
1710 }
1711 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1712 printed = 1;
1713 printf("nfsv4 expired locks lost\n");
1714 }
1715 nfscl_cleandeleg(dp);
1716 nfscl_freedeleg(&clp->nfsc_deleg, dp);
1717 dp = ndp;
1718 }
1719 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1720 panic("nfsclexp");
1721
1722 /*
1723 * Now, try and reopen against the server.
1724 */
1725 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1726 owp->nfsow_seqid = 0;
1727 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1728 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1729 if (ret && !printed) {
1730 printed = 1;
1731 printf("nfsv4 expired locks lost\n");
1732 }
1733 }
1734 if (LIST_EMPTY(&owp->nfsow_open))
1735 nfscl_freeopenowner(owp, 0);
1736 }
1737 }
1738
1739 /*
1740 * This function must be called after the process represented by "own" has
1741 * exited. Must be called with CLSTATE lock held.
1742 */
1743 static void
1744 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1745 {
1746 struct nfsclowner *owp, *nowp;
1747 struct nfscllockowner *lp, *nlp;
1748 struct nfscldeleg *dp;
1749
1750 /* First, get rid of local locks on delegations. */
1751 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1752 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1753 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1754 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1755 panic("nfscllckw");
1756 nfscl_freelockowner(lp, 1);
1757 }
1758 }
1759 }
1760 owp = LIST_FIRST(&clp->nfsc_owner);
1761 while (owp != NULL) {
1762 nowp = LIST_NEXT(owp, nfsow_list);
1763 if (!NFSBCMP(owp->nfsow_owner, own,
1764 NFSV4CL_LOCKNAMELEN)) {
1765 /*
1766 * If there are children that haven't closed the
1767 * file descriptors yet, the opens will still be
1768 * here. For that case, let the renew thread clear
1769 * out the OpenOwner later.
1770 */
1771 if (LIST_EMPTY(&owp->nfsow_open))
1772 nfscl_freeopenowner(owp, 0);
1773 else
1774 owp->nfsow_defunct = 1;
1775 }
1776 owp = nowp;
1777 }
1778 }
1779
1780 /*
1781 * Find open/lock owners for processes that have exited.
1782 */
1783 static void
1784 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1785 {
1786 struct nfsclowner *owp, *nowp;
1787 struct nfsclopen *op;
1788 struct nfscllockowner *lp, *nlp;
1789 struct nfscldeleg *dp;
1790
1791 /*
1792 * All the pidhash locks must be acquired, since they are sx locks
1793 * and must be acquired before the mutexes. The pid(s) that will
1794 * be used aren't known yet, so all the locks need to be acquired.
1795 * Fortunately, this function is only performed once/sec.
1796 */
1797 pidhash_slockall();
1798 NFSLOCKCLSTATE();
1799 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1800 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1801 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1802 if (LIST_EMPTY(&lp->nfsl_lock))
1803 nfscl_emptylockowner(lp, lhp);
1804 }
1805 }
1806 if (nfscl_procdoesntexist(owp->nfsow_owner))
1807 nfscl_cleanup_common(clp, owp->nfsow_owner);
1808 }
1809
1810 /*
1811 * For the single open_owner case, these lock owners need to be
1812 * checked to see if they still exist separately.
1813 * This is because nfscl_procdoesntexist() never returns true for
1814 * the single open_owner so that the above doesn't ever call
1815 * nfscl_cleanup_common().
1816 */
1817 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1818 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1819 if (nfscl_procdoesntexist(lp->nfsl_owner))
1820 nfscl_cleanup_common(clp, lp->nfsl_owner);
1821 }
1822 }
1823 NFSUNLOCKCLSTATE();
1824 pidhash_sunlockall();
1825 }
1826
1827 /*
1828 * Take the empty lock owner and move it to the local lhp list if the
1829 * associated process no longer exists.
1830 */
1831 static void
1832 nfscl_emptylockowner(struct nfscllockowner *lp,
1833 struct nfscllockownerfhhead *lhp)
1834 {
1835 struct nfscllockownerfh *lfhp, *mylfhp;
1836 struct nfscllockowner *nlp;
1837 int fnd_it;
1838
1839 /* If not a Posix lock owner, just return. */
1840 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1841 return;
1842
1843 fnd_it = 0;
1844 mylfhp = NULL;
1845 /*
1846 * First, search to see if this lock owner is already in the list.
1847 * If it is, then the associated process no longer exists.
1848 */
1849 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
1850 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
1851 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
1852 lfhp->nfslfh_len))
1853 mylfhp = lfhp;
1854 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
1855 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
1856 NFSV4CL_LOCKNAMELEN))
1857 fnd_it = 1;
1858 }
1859 /* If not found, check if process still exists. */
1860 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
1861 return;
1862
1863 /* Move the lock owner over to the local list. */
1864 if (mylfhp == NULL) {
1865 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
1866 M_NOWAIT);
1867 if (mylfhp == NULL)
1868 return;
1869 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
1870 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
1871 mylfhp->nfslfh_len);
1872 LIST_INIT(&mylfhp->nfslfh_lock);
1873 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
1874 }
1875 LIST_REMOVE(lp, nfsl_list);
1876 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
1877 }
1878
1879 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
1880 /*
1881 * Called from nfs umount to free up the clientid.
1882 */
1883 void
1884 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1885 {
1886 struct nfsclclient *clp;
1887 struct ucred *cred;
1888 int igotlock;
1889
1890 /*
1891 * For the case that matters, this is the thread that set
1892 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1893 * done to ensure that any thread executing nfscl_getcl() after
1894 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1895 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1896 * explanation, courtesy of Alan Cox.
1897 * What follows is a snippet from Alan Cox's email at:
1898 * https://docs.FreeBSD.org/cgi/mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1899 *
1900 * 1. Set MNTK_UNMOUNTF
1901 * 2. Acquire a standard FreeBSD mutex "m".
1902 * 3. Update some data structures.
1903 * 4. Release mutex "m".
1904 *
1905 * Then, other threads that acquire "m" after step 4 has occurred will
1906 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
1907 * step 2 may or may not see MNTK_UNMOUNTF as set.
1908 */
1909 NFSLOCKCLSTATE();
1910 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1911 fake_global++;
1912 NFSUNLOCKCLSTATE();
1913 NFSLOCKCLSTATE();
1914 }
1915
1916 clp = nmp->nm_clp;
1917 if (clp != NULL) {
1918 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1919 panic("nfscl umount");
1920
1921 /*
1922 * First, handshake with the nfscl renew thread, to terminate
1923 * it.
1924 */
1925 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1926 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1927 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1928 "nfsclumnt", hz);
1929
1930 /*
1931 * Now, get the exclusive lock on the client state, so
1932 * that no uses of the state are still in progress.
1933 */
1934 do {
1935 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1936 NFSCLSTATEMUTEXPTR, NULL);
1937 } while (!igotlock);
1938 NFSUNLOCKCLSTATE();
1939
1940 /*
1941 * Free up all the state. It will expire on the server, but
1942 * maybe we should do a SetClientId/SetClientIdConfirm so
1943 * the server throws it away?
1944 */
1945 LIST_REMOVE(clp, nfsc_list);
1946 nfscl_delegreturnall(clp, p);
1947 cred = newnfs_getcred();
1948 if (NFSHASNFSV4N(nmp)) {
1949 (void)nfsrpc_destroysession(nmp, clp, cred, p);
1950 (void)nfsrpc_destroyclient(nmp, clp, cred, p);
1951 } else
1952 (void)nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
1953 nfscl_cleanclient(clp);
1954 nmp->nm_clp = NULL;
1955 NFSFREECRED(cred);
1956 free(clp, M_NFSCLCLIENT);
1957 } else
1958 NFSUNLOCKCLSTATE();
1959 }
1960
1961 /*
1962 * This function is called when a server replies with NFSERR_STALECLIENTID
1963 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
1964 * doing Opens and Locks with reclaim. If these fail, it deletes the
1965 * corresponding state.
1966 */
1967 static void
1968 nfscl_recover(struct nfsclclient *clp, bool *retokp, struct ucred *cred,
1969 NFSPROC_T *p)
1970 {
1971 struct nfsclowner *owp, *nowp;
1972 struct nfsclopen *op, *nop;
1973 struct nfscllockowner *lp, *nlp;
1974 struct nfscllock *lop, *nlop;
1975 struct nfscldeleg *dp, *ndp, *tdp;
1976 struct nfsmount *nmp;
1977 struct ucred *tcred;
1978 struct nfsclopenhead extra_open;
1979 struct nfscldeleghead extra_deleg;
1980 struct nfsreq *rep;
1981 u_int64_t len;
1982 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1983 int i, igotlock = 0, error, trycnt, firstlock;
1984 struct nfscllayout *lyp, *nlyp;
1985
1986 /*
1987 * First, lock the client structure, so everyone else will
1988 * block when trying to use state.
1989 */
1990 NFSLOCKCLSTATE();
1991 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1992 do {
1993 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1994 NFSCLSTATEMUTEXPTR, NULL);
1995 } while (!igotlock);
1996 NFSUNLOCKCLSTATE();
1997
1998 nmp = clp->nfsc_nmp;
1999 if (nmp == NULL)
2000 panic("nfscl recover");
2001
2002 /*
2003 * For now, just get rid of all layouts. There may be a need
2004 * to do LayoutCommit Ops with reclaim == true later.
2005 */
2006 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
2007 nfscl_freelayout(lyp);
2008 TAILQ_INIT(&clp->nfsc_layout);
2009 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
2010 LIST_INIT(&clp->nfsc_layouthash[i]);
2011
2012 trycnt = 5;
2013 tcred = NULL;
2014 do {
2015 error = nfsrpc_setclient(nmp, clp, 1, retokp, cred, p);
2016 } while ((error == NFSERR_STALECLIENTID ||
2017 error == NFSERR_BADSESSION ||
2018 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2019 if (error) {
2020 NFSLOCKCLSTATE();
2021 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2022 NFSCLFLAGS_RECVRINPROG);
2023 wakeup(&clp->nfsc_flags);
2024 nfsv4_unlock(&clp->nfsc_lock, 0);
2025 NFSUNLOCKCLSTATE();
2026 return;
2027 }
2028 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2029 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2030
2031 /*
2032 * Mark requests already queued on the server, so that they don't
2033 * initiate another recovery cycle. Any requests already in the
2034 * queue that handle state information will have the old stale
2035 * clientid/stateid and will get a NFSERR_STALESTATEID,
2036 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2037 * This will be translated to NFSERR_STALEDONTRECOVER when
2038 * R_DONTRECOVER is set.
2039 */
2040 NFSLOCKREQ();
2041 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2042 if (rep->r_nmp == nmp)
2043 rep->r_flags |= R_DONTRECOVER;
2044 }
2045 NFSUNLOCKREQ();
2046
2047 /*
2048 * If nfsrpc_setclient() returns *retokp == true,
2049 * no more recovery is needed.
2050 */
2051 if (*retokp)
2052 goto out;
2053
2054 /*
2055 * Now, mark all delegations "need reclaim".
2056 */
2057 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2058 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2059
2060 TAILQ_INIT(&extra_deleg);
2061 LIST_INIT(&extra_open);
2062 /*
2063 * Now traverse the state lists, doing Open and Lock Reclaims.
2064 */
2065 tcred = newnfs_getcred();
2066 owp = LIST_FIRST(&clp->nfsc_owner);
2067 while (owp != NULL) {
2068 nowp = LIST_NEXT(owp, nfsow_list);
2069 owp->nfsow_seqid = 0;
2070 op = LIST_FIRST(&owp->nfsow_open);
2071 while (op != NULL) {
2072 nop = LIST_NEXT(op, nfso_list);
2073 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2074 /* Search for a delegation to reclaim with the open */
2075 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2076 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2077 continue;
2078 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2079 mode = NFSV4OPEN_ACCESSWRITE;
2080 delegtype = NFSV4OPEN_DELEGATEWRITE;
2081 } else {
2082 mode = NFSV4OPEN_ACCESSREAD;
2083 delegtype = NFSV4OPEN_DELEGATEREAD;
2084 }
2085 if ((op->nfso_mode & mode) == mode &&
2086 op->nfso_fhlen == dp->nfsdl_fhlen &&
2087 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2088 break;
2089 }
2090 ndp = dp;
2091 if (dp == NULL)
2092 delegtype = NFSV4OPEN_DELEGATENONE;
2093 newnfs_copycred(&op->nfso_cred, tcred);
2094 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2095 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2096 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2097 tcred, p);
2098 if (!error) {
2099 /* Handle any replied delegation */
2100 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2101 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2102 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2103 mode = NFSV4OPEN_ACCESSWRITE;
2104 else
2105 mode = NFSV4OPEN_ACCESSREAD;
2106 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2107 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2108 continue;
2109 if ((op->nfso_mode & mode) == mode &&
2110 op->nfso_fhlen == dp->nfsdl_fhlen &&
2111 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2112 op->nfso_fhlen)) {
2113 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2114 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2115 dp->nfsdl_ace = ndp->nfsdl_ace;
2116 dp->nfsdl_change = ndp->nfsdl_change;
2117 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2118 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2119 dp->nfsdl_flags |= NFSCLDL_RECALL;
2120 free(ndp, M_NFSCLDELEG);
2121 ndp = NULL;
2122 break;
2123 }
2124 }
2125 }
2126 if (ndp != NULL)
2127 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2128
2129 /* and reclaim all byte range locks */
2130 lp = LIST_FIRST(&op->nfso_lock);
2131 while (lp != NULL) {
2132 nlp = LIST_NEXT(lp, nfsl_list);
2133 lp->nfsl_seqid = 0;
2134 firstlock = 1;
2135 lop = LIST_FIRST(&lp->nfsl_lock);
2136 while (lop != NULL) {
2137 nlop = LIST_NEXT(lop, nfslo_list);
2138 if (lop->nfslo_end == NFS64BITSSET)
2139 len = NFS64BITSSET;
2140 else
2141 len = lop->nfslo_end - lop->nfslo_first;
2142 error = nfscl_trylock(nmp, NULL,
2143 op->nfso_fh, op->nfso_fhlen, lp,
2144 firstlock, 1, lop->nfslo_first, len,
2145 lop->nfslo_type, tcred, p);
2146 if (error != 0)
2147 nfscl_freelock(lop, 0);
2148 else
2149 firstlock = 0;
2150 lop = nlop;
2151 }
2152 /* If no locks, but a lockowner, just delete it. */
2153 if (LIST_EMPTY(&lp->nfsl_lock))
2154 nfscl_freelockowner(lp, 0);
2155 lp = nlp;
2156 }
2157 }
2158 }
2159 if (error != 0 && error != NFSERR_BADSESSION)
2160 nfscl_freeopen(op, 0);
2161 op = nop;
2162 }
2163 owp = nowp;
2164 }
2165
2166 /*
2167 * Now, try and get any delegations not yet reclaimed by cobbling
2168 * to-gether an appropriate open.
2169 */
2170 nowp = NULL;
2171 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2172 while (dp != NULL) {
2173 ndp = TAILQ_NEXT(dp, nfsdl_list);
2174 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2175 if (nowp == NULL) {
2176 nowp = malloc(
2177 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2178 /*
2179 * Name must be as long an largest possible
2180 * NFSV4CL_LOCKNAMELEN. 12 for now.
2181 */
2182 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2183 NFSV4CL_LOCKNAMELEN);
2184 LIST_INIT(&nowp->nfsow_open);
2185 nowp->nfsow_clp = clp;
2186 nowp->nfsow_seqid = 0;
2187 nowp->nfsow_defunct = 0;
2188 nfscl_lockinit(&nowp->nfsow_rwlock);
2189 }
2190 nop = NULL;
2191 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2192 nop = malloc(sizeof (struct nfsclopen) +
2193 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2194 nop->nfso_own = nowp;
2195 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2196 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2197 delegtype = NFSV4OPEN_DELEGATEWRITE;
2198 } else {
2199 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2200 delegtype = NFSV4OPEN_DELEGATEREAD;
2201 }
2202 nop->nfso_opencnt = 0;
2203 nop->nfso_posixlock = 1;
2204 nop->nfso_fhlen = dp->nfsdl_fhlen;
2205 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2206 LIST_INIT(&nop->nfso_lock);
2207 nop->nfso_stateid.seqid = 0;
2208 nop->nfso_stateid.other[0] = 0;
2209 nop->nfso_stateid.other[1] = 0;
2210 nop->nfso_stateid.other[2] = 0;
2211 newnfs_copycred(&dp->nfsdl_cred, tcred);
2212 newnfs_copyincred(tcred, &nop->nfso_cred);
2213 tdp = NULL;
2214 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2215 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2216 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2217 delegtype, tcred, p);
2218 if (tdp != NULL) {
2219 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2220 mode = NFSV4OPEN_ACCESSWRITE;
2221 else
2222 mode = NFSV4OPEN_ACCESSREAD;
2223 if ((nop->nfso_mode & mode) == mode &&
2224 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2225 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2226 nop->nfso_fhlen)) {
2227 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2228 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2229 dp->nfsdl_ace = tdp->nfsdl_ace;
2230 dp->nfsdl_change = tdp->nfsdl_change;
2231 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2232 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2233 dp->nfsdl_flags |= NFSCLDL_RECALL;
2234 free(tdp, M_NFSCLDELEG);
2235 } else {
2236 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2237 }
2238 }
2239 }
2240 if (error) {
2241 if (nop != NULL)
2242 free(nop, M_NFSCLOPEN);
2243 /*
2244 * Couldn't reclaim it, so throw the state
2245 * away. Ouch!!
2246 */
2247 nfscl_cleandeleg(dp);
2248 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2249 } else {
2250 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2251 }
2252 }
2253 dp = ndp;
2254 }
2255
2256 /*
2257 * Now, get rid of extra Opens and Delegations.
2258 */
2259 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2260 do {
2261 newnfs_copycred(&op->nfso_cred, tcred);
2262 error = nfscl_tryclose(op, tcred, nmp, p);
2263 if (error == NFSERR_GRACE)
2264 (void) nfs_catnap(PZERO, error, "nfsexcls");
2265 } while (error == NFSERR_GRACE);
2266 LIST_REMOVE(op, nfso_list);
2267 free(op, M_NFSCLOPEN);
2268 }
2269 if (nowp != NULL)
2270 free(nowp, M_NFSCLOWNER);
2271
2272 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2273 do {
2274 newnfs_copycred(&dp->nfsdl_cred, tcred);
2275 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2276 if (error == NFSERR_GRACE)
2277 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2278 } while (error == NFSERR_GRACE);
2279 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2280 free(dp, M_NFSCLDELEG);
2281 }
2282
2283 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2284 if (NFSHASNFSV4N(nmp))
2285 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2286
2287 out:
2288 NFSLOCKCLSTATE();
2289 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2290 wakeup(&clp->nfsc_flags);
2291 nfsv4_unlock(&clp->nfsc_lock, 0);
2292 NFSUNLOCKCLSTATE();
2293 if (tcred != NULL)
2294 NFSFREECRED(tcred);
2295 }
2296
2297 /*
2298 * This function is called when a server replies with NFSERR_EXPIRED.
2299 * It deletes all state for the client and does a fresh SetClientId/confirm.
2300 * XXX Someday it should post a signal to the process(es) that hold the
2301 * state, so they know that lock state has been lost.
2302 */
2303 int
2304 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2305 {
2306 struct nfsmount *nmp;
2307 struct ucred *cred;
2308 int igotlock = 0, error, trycnt;
2309
2310 /*
2311 * If the clientid has gone away or a new SetClientid has already
2312 * been done, just return ok.
2313 */
2314 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2315 return (0);
2316
2317 /*
2318 * First, lock the client structure, so everyone else will
2319 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2320 * that only one thread does the work.
2321 */
2322 NFSLOCKCLSTATE();
2323 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2324 do {
2325 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2326 NFSCLSTATEMUTEXPTR, NULL);
2327 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2328 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2329 if (igotlock)
2330 nfsv4_unlock(&clp->nfsc_lock, 0);
2331 NFSUNLOCKCLSTATE();
2332 return (0);
2333 }
2334 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2335 NFSUNLOCKCLSTATE();
2336
2337 nmp = clp->nfsc_nmp;
2338 if (nmp == NULL)
2339 panic("nfscl expired");
2340 cred = newnfs_getcred();
2341 trycnt = 5;
2342 do {
2343 error = nfsrpc_setclient(nmp, clp, 0, NULL, cred, p);
2344 } while ((error == NFSERR_STALECLIENTID ||
2345 error == NFSERR_BADSESSION ||
2346 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2347 if (error) {
2348 NFSLOCKCLSTATE();
2349 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2350 } else {
2351 /*
2352 * Expire the state for the client.
2353 */
2354 nfscl_expireclient(clp, nmp, cred, p);
2355 NFSLOCKCLSTATE();
2356 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2357 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2358 }
2359 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2360 wakeup(&clp->nfsc_flags);
2361 nfsv4_unlock(&clp->nfsc_lock, 0);
2362 NFSUNLOCKCLSTATE();
2363 NFSFREECRED(cred);
2364 return (error);
2365 }
2366
2367 /*
2368 * This function inserts a lock in the list after insert_lop.
2369 */
2370 static void
2371 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2372 struct nfscllock *insert_lop, int local)
2373 {
2374
2375 if ((struct nfscllockowner *)insert_lop == lp)
2376 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2377 else
2378 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2379 if (local)
2380 nfsstatsv1.cllocallocks++;
2381 else
2382 nfsstatsv1.cllocks++;
2383 }
2384
2385 /*
2386 * This function updates the locking for a lock owner and given file. It
2387 * maintains a list of lock ranges ordered on increasing file offset that
2388 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2389 * It always adds new_lop to the list and sometimes uses the one pointed
2390 * at by other_lopp.
2391 * Returns 1 if the locks were modified, 0 otherwise.
2392 */
2393 static int
2394 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2395 struct nfscllock **other_lopp, int local)
2396 {
2397 struct nfscllock *new_lop = *new_lopp;
2398 struct nfscllock *lop, *tlop, *ilop;
2399 struct nfscllock *other_lop;
2400 int unlock = 0, modified = 0;
2401 u_int64_t tmp;
2402
2403 /*
2404 * Work down the list until the lock is merged.
2405 */
2406 if (new_lop->nfslo_type == F_UNLCK)
2407 unlock = 1;
2408 ilop = (struct nfscllock *)lp;
2409 lop = LIST_FIRST(&lp->nfsl_lock);
2410 while (lop != NULL) {
2411 /*
2412 * Only check locks for this file that aren't before the start of
2413 * new lock's range.
2414 */
2415 if (lop->nfslo_end >= new_lop->nfslo_first) {
2416 if (new_lop->nfslo_end < lop->nfslo_first) {
2417 /*
2418 * If the new lock ends before the start of the
2419 * current lock's range, no merge, just insert
2420 * the new lock.
2421 */
2422 break;
2423 }
2424 if (new_lop->nfslo_type == lop->nfslo_type ||
2425 (new_lop->nfslo_first <= lop->nfslo_first &&
2426 new_lop->nfslo_end >= lop->nfslo_end)) {
2427 /*
2428 * This lock can be absorbed by the new lock/unlock.
2429 * This happens when it covers the entire range
2430 * of the old lock or is contiguous
2431 * with the old lock and is of the same type or an
2432 * unlock.
2433 */
2434 if (new_lop->nfslo_type != lop->nfslo_type ||
2435 new_lop->nfslo_first != lop->nfslo_first ||
2436 new_lop->nfslo_end != lop->nfslo_end)
2437 modified = 1;
2438 if (lop->nfslo_first < new_lop->nfslo_first)
2439 new_lop->nfslo_first = lop->nfslo_first;
2440 if (lop->nfslo_end > new_lop->nfslo_end)
2441 new_lop->nfslo_end = lop->nfslo_end;
2442 tlop = lop;
2443 lop = LIST_NEXT(lop, nfslo_list);
2444 nfscl_freelock(tlop, local);
2445 continue;
2446 }
2447
2448 /*
2449 * All these cases are for contiguous locks that are not the
2450 * same type, so they can't be merged.
2451 */
2452 if (new_lop->nfslo_first <= lop->nfslo_first) {
2453 /*
2454 * This case is where the new lock overlaps with the
2455 * first part of the old lock. Move the start of the
2456 * old lock to just past the end of the new lock. The
2457 * new lock will be inserted in front of the old, since
2458 * ilop hasn't been updated. (We are done now.)
2459 */
2460 if (lop->nfslo_first != new_lop->nfslo_end) {
2461 lop->nfslo_first = new_lop->nfslo_end;
2462 modified = 1;
2463 }
2464 break;
2465 }
2466 if (new_lop->nfslo_end >= lop->nfslo_end) {
2467 /*
2468 * This case is where the new lock overlaps with the
2469 * end of the old lock's range. Move the old lock's
2470 * end to just before the new lock's first and insert
2471 * the new lock after the old lock.
2472 * Might not be done yet, since the new lock could
2473 * overlap further locks with higher ranges.
2474 */
2475 if (lop->nfslo_end != new_lop->nfslo_first) {
2476 lop->nfslo_end = new_lop->nfslo_first;
2477 modified = 1;
2478 }
2479 ilop = lop;
2480 lop = LIST_NEXT(lop, nfslo_list);
2481 continue;
2482 }
2483 /*
2484 * The final case is where the new lock's range is in the
2485 * middle of the current lock's and splits the current lock
2486 * up. Use *other_lopp to handle the second part of the
2487 * split old lock range. (We are done now.)
2488 * For unlock, we use new_lop as other_lop and tmp, since
2489 * other_lop and new_lop are the same for this case.
2490 * We noted the unlock case above, so we don't need
2491 * new_lop->nfslo_type any longer.
2492 */
2493 tmp = new_lop->nfslo_first;
2494 if (unlock) {
2495 other_lop = new_lop;
2496 *new_lopp = NULL;
2497 } else {
2498 other_lop = *other_lopp;
2499 *other_lopp = NULL;
2500 }
2501 other_lop->nfslo_first = new_lop->nfslo_end;
2502 other_lop->nfslo_end = lop->nfslo_end;
2503 other_lop->nfslo_type = lop->nfslo_type;
2504 lop->nfslo_end = tmp;
2505 nfscl_insertlock(lp, other_lop, lop, local);
2506 ilop = lop;
2507 modified = 1;
2508 break;
2509 }
2510 ilop = lop;
2511 lop = LIST_NEXT(lop, nfslo_list);
2512 if (lop == NULL)
2513 break;
2514 }
2515
2516 /*
2517 * Insert the new lock in the list at the appropriate place.
2518 */
2519 if (!unlock) {
2520 nfscl_insertlock(lp, new_lop, ilop, local);
2521 *new_lopp = NULL;
2522 modified = 1;
2523 }
2524 return (modified);
2525 }
2526
2527 /*
2528 * This function must be run as a kernel thread.
2529 * It does Renew Ops and recovery, when required.
2530 */
2531 void
2532 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2533 {
2534 struct nfsclowner *owp, *nowp;
2535 struct nfsclopen *op;
2536 struct nfscllockowner *lp, *nlp;
2537 struct nfscldeleghead dh;
2538 struct nfscldeleg *dp, *ndp;
2539 struct ucred *cred;
2540 u_int32_t clidrev;
2541 int error, cbpathdown, islept, igotlock, ret, clearok;
2542 uint32_t recover_done_time = 0;
2543 time_t mytime;
2544 static time_t prevsec = 0;
2545 struct nfscllockownerfh *lfhp, *nlfhp;
2546 struct nfscllockownerfhhead lfh;
2547 struct nfscllayout *lyp, *nlyp;
2548 struct nfscldevinfo *dip, *ndip;
2549 struct nfscllayouthead rlh;
2550 struct nfsclrecalllayout *recallp;
2551 struct nfsclds *dsp;
2552 bool retok;
2553
2554 cred = newnfs_getcred();
2555 NFSLOCKCLSTATE();
2556 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2557 NFSUNLOCKCLSTATE();
2558 for(;;) {
2559 newnfs_setroot(cred);
2560 cbpathdown = 0;
2561 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2562 /*
2563 * Only allow one full recover within 1/2 of the lease
2564 * duration (nfsc_renew).
2565 * retok is value/result. If passed in set to true,
2566 * it indicates only a CreateSession operation should
2567 * be attempted.
2568 * If it is returned true, it indicates that the
2569 * recovery only required a CreateSession.
2570 */
2571 retok = true;
2572 if (recover_done_time < NFSD_MONOSEC) {
2573 recover_done_time = NFSD_MONOSEC +
2574 clp->nfsc_renew;
2575 retok = false;
2576 }
2577 NFSCL_DEBUG(1, "Doing recovery, only "
2578 "createsession=%d\n", retok);
2579 nfscl_recover(clp, &retok, cred, p);
2580 }
2581 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2582 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2583 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2584 clidrev = clp->nfsc_clientidrev;
2585 error = nfsrpc_renew(clp, NULL, cred, p);
2586 if (error == NFSERR_CBPATHDOWN)
2587 cbpathdown = 1;
2588 else if (error == NFSERR_STALECLIENTID ||
2589 error == NFSERR_BADSESSION) {
2590 NFSLOCKCLSTATE();
2591 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2592 NFSUNLOCKCLSTATE();
2593 } else if (error == NFSERR_EXPIRED)
2594 (void) nfscl_hasexpired(clp, clidrev, p);
2595 }
2596
2597 checkdsrenew:
2598 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2599 /* Do renews for any DS sessions. */
2600 NFSLOCKMNT(clp->nfsc_nmp);
2601 /* Skip first entry, since the MDS is handled above. */
2602 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2603 if (dsp != NULL)
2604 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2605 while (dsp != NULL) {
2606 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2607 dsp->nfsclds_sess.nfsess_defunct == 0) {
2608 dsp->nfsclds_expire = NFSD_MONOSEC +
2609 clp->nfsc_renew;
2610 NFSUNLOCKMNT(clp->nfsc_nmp);
2611 (void)nfsrpc_renew(clp, dsp, cred, p);
2612 goto checkdsrenew;
2613 }
2614 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2615 }
2616 NFSUNLOCKMNT(clp->nfsc_nmp);
2617 }
2618
2619 TAILQ_INIT(&dh);
2620 NFSLOCKCLSTATE();
2621 if (cbpathdown)
2622 /* It's a Total Recall! */
2623 nfscl_totalrecall(clp);
2624
2625 /*
2626 * Now, handle defunct owners.
2627 */
2628 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2629 if (LIST_EMPTY(&owp->nfsow_open)) {
2630 if (owp->nfsow_defunct != 0)
2631 nfscl_freeopenowner(owp, 0);
2632 }
2633 }
2634
2635 /*
2636 * Do the recall on any delegations. To avoid trouble, always
2637 * come back up here after having slept.
2638 */
2639 igotlock = 0;
2640 tryagain:
2641 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2642 while (dp != NULL) {
2643 ndp = TAILQ_NEXT(dp, nfsdl_list);
2644 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2645 /*
2646 * Wait for outstanding I/O ops to be done.
2647 */
2648 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2649 if (igotlock) {
2650 nfsv4_unlock(&clp->nfsc_lock, 0);
2651 igotlock = 0;
2652 }
2653 dp->nfsdl_rwlock.nfslock_lock |=
2654 NFSV4LOCK_WANTED;
2655 (void) nfsmsleep(&dp->nfsdl_rwlock,
2656 NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2657 NULL);
2658 goto tryagain;
2659 }
2660 while (!igotlock) {
2661 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2662 &islept, NFSCLSTATEMUTEXPTR, NULL);
2663 if (islept)
2664 goto tryagain;
2665 }
2666 NFSUNLOCKCLSTATE();
2667 newnfs_copycred(&dp->nfsdl_cred, cred);
2668 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2669 NULL, cred, p, 1);
2670 if (!ret) {
2671 nfscl_cleandeleg(dp);
2672 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2673 nfsdl_list);
2674 LIST_REMOVE(dp, nfsdl_hash);
2675 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2676 nfscl_delegcnt--;
2677 nfsstatsv1.cldelegates--;
2678 }
2679 NFSLOCKCLSTATE();
2680 }
2681 dp = ndp;
2682 }
2683
2684 /*
2685 * Clear out old delegations, if we are above the high water
2686 * mark. Only clear out ones with no state related to them.
2687 * The tailq list is in LRU order.
2688 */
2689 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2690 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2691 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2692 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2693 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2694 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2695 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2696 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2697 clearok = 1;
2698 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2699 op = LIST_FIRST(&owp->nfsow_open);
2700 if (op != NULL) {
2701 clearok = 0;
2702 break;
2703 }
2704 }
2705 if (clearok) {
2706 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2707 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2708 clearok = 0;
2709 break;
2710 }
2711 }
2712 }
2713 if (clearok) {
2714 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2715 LIST_REMOVE(dp, nfsdl_hash);
2716 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2717 nfscl_delegcnt--;
2718 nfsstatsv1.cldelegates--;
2719 }
2720 }
2721 dp = ndp;
2722 }
2723 if (igotlock)
2724 nfsv4_unlock(&clp->nfsc_lock, 0);
2725
2726 /*
2727 * Do the recall on any layouts. To avoid trouble, always
2728 * come back up here after having slept.
2729 */
2730 TAILQ_INIT(&rlh);
2731 tryagain2:
2732 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2733 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2734 /*
2735 * Wait for outstanding I/O ops to be done.
2736 */
2737 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2738 (lyp->nfsly_lock.nfslock_lock &
2739 NFSV4LOCK_LOCK) != 0) {
2740 lyp->nfsly_lock.nfslock_lock |=
2741 NFSV4LOCK_WANTED;
2742 nfsmsleep(&lyp->nfsly_lock.nfslock_lock,
2743 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp",
2744 NULL);
2745 goto tryagain2;
2746 }
2747 /* Move the layout to the recall list. */
2748 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2749 nfsly_list);
2750 LIST_REMOVE(lyp, nfsly_hash);
2751 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2752
2753 /* Handle any layout commits. */
2754 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2755 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2756 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2757 NFSUNLOCKCLSTATE();
2758 NFSCL_DEBUG(3, "do layoutcommit\n");
2759 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2760 cred, p);
2761 NFSLOCKCLSTATE();
2762 goto tryagain2;
2763 }
2764 }
2765 }
2766
2767 /* Now, look for stale layouts. */
2768 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2769 while (lyp != NULL) {
2770 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2771 if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
2772 (lyp->nfsly_flags & NFSLY_RECALL) == 0 &&
2773 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2774 lyp->nfsly_lock.nfslock_lock == 0) {
2775 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2776 nfscl_layoutcnt);
2777 recallp = malloc(sizeof(*recallp),
2778 M_NFSLAYRECALL, M_NOWAIT);
2779 if (recallp == NULL)
2780 break;
2781 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2782 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2783 lyp->nfsly_stateid.seqid, 0, 0, NULL,
2784 recallp);
2785 }
2786 lyp = nlyp;
2787 }
2788
2789 /*
2790 * Free up any unreferenced device info structures.
2791 */
2792 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
2793 if (dip->nfsdi_layoutrefs == 0 &&
2794 dip->nfsdi_refcnt == 0) {
2795 NFSCL_DEBUG(4, "freeing devinfo\n");
2796 LIST_REMOVE(dip, nfsdi_list);
2797 nfscl_freedevinfo(dip);
2798 }
2799 }
2800 NFSUNLOCKCLSTATE();
2801
2802 /* Do layout return(s), as required. */
2803 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
2804 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
2805 NFSCL_DEBUG(4, "ret layout\n");
2806 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
2807 nfscl_freelayout(lyp);
2808 }
2809
2810 /*
2811 * Delegreturn any delegations cleaned out or recalled.
2812 */
2813 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2814 newnfs_copycred(&dp->nfsdl_cred, cred);
2815 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2816 TAILQ_REMOVE(&dh, dp, nfsdl_list);
2817 free(dp, M_NFSCLDELEG);
2818 }
2819
2820 SLIST_INIT(&lfh);
2821 /*
2822 * Call nfscl_cleanupkext() once per second to check for
2823 * open/lock owners where the process has exited.
2824 */
2825 mytime = NFSD_MONOSEC;
2826 if (prevsec != mytime) {
2827 prevsec = mytime;
2828 nfscl_cleanupkext(clp, &lfh);
2829 }
2830
2831 /*
2832 * Do a ReleaseLockOwner for all lock owners where the
2833 * associated process no longer exists, as found by
2834 * nfscl_cleanupkext().
2835 */
2836 newnfs_setroot(cred);
2837 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
2838 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
2839 nlp) {
2840 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
2841 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
2842 p);
2843 nfscl_freelockowner(lp, 0);
2844 }
2845 free(lfhp, M_TEMP);
2846 }
2847 SLIST_INIT(&lfh);
2848
2849 NFSLOCKCLSTATE();
2850 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2851 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2852 hz);
2853 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2854 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2855 NFSUNLOCKCLSTATE();
2856 NFSFREECRED(cred);
2857 wakeup((caddr_t)clp);
2858 return;
2859 }
2860 NFSUNLOCKCLSTATE();
2861 }
2862 }
2863
2864 /*
2865 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
2866 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
2867 */
2868 void
2869 nfscl_initiate_recovery(struct nfsclclient *clp)
2870 {
2871
2872 if (clp == NULL)
2873 return;
2874 NFSLOCKCLSTATE();
2875 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2876 NFSUNLOCKCLSTATE();
2877 wakeup((caddr_t)clp);
2878 }
2879
2880 /*
2881 * Dump out the state stuff for debugging.
2882 */
2883 void
2884 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2885 int lockowner, int locks)
2886 {
2887 struct nfsclclient *clp;
2888 struct nfsclowner *owp;
2889 struct nfsclopen *op;
2890 struct nfscllockowner *lp;
2891 struct nfscllock *lop;
2892 struct nfscldeleg *dp;
2893
2894 clp = nmp->nm_clp;
2895 if (clp == NULL) {
2896 printf("nfscl dumpstate NULL clp\n");
2897 return;
2898 }
2899 NFSLOCKCLSTATE();
2900 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2901 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2902 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2903 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2904 owp->nfsow_owner[0], owp->nfsow_owner[1],
2905 owp->nfsow_owner[2], owp->nfsow_owner[3],
2906 owp->nfsow_seqid);
2907 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2908 if (opens)
2909 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2910 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2911 op->nfso_stateid.other[2], op->nfso_opencnt,
2912 op->nfso_fh[12]);
2913 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2914 if (lockowner)
2915 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2916 lp->nfsl_owner[0], lp->nfsl_owner[1],
2917 lp->nfsl_owner[2], lp->nfsl_owner[3],
2918 lp->nfsl_seqid,
2919 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2920 lp->nfsl_stateid.other[2]);
2921 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2922 if (locks)
2923 #ifdef __FreeBSD__
2924 printf("lck typ=%d fst=%ju end=%ju\n",
2925 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2926 (intmax_t)lop->nfslo_end);
2927 #else
2928 printf("lck typ=%d fst=%qd end=%qd\n",
2929 lop->nfslo_type, lop->nfslo_first,
2930 lop->nfslo_end);
2931 #endif
2932 }
2933 }
2934 }
2935 }
2936 }
2937 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2938 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2939 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2940 owp->nfsow_owner[0], owp->nfsow_owner[1],
2941 owp->nfsow_owner[2], owp->nfsow_owner[3],
2942 owp->nfsow_seqid);
2943 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2944 if (opens)
2945 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2946 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2947 op->nfso_stateid.other[2], op->nfso_opencnt,
2948 op->nfso_fh[12]);
2949 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2950 if (lockowner)
2951 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2952 lp->nfsl_owner[0], lp->nfsl_owner[1],
2953 lp->nfsl_owner[2], lp->nfsl_owner[3],
2954 lp->nfsl_seqid,
2955 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2956 lp->nfsl_stateid.other[2]);
2957 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2958 if (locks)
2959 #ifdef __FreeBSD__
2960 printf("lck typ=%d fst=%ju end=%ju\n",
2961 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2962 (intmax_t)lop->nfslo_end);
2963 #else
2964 printf("lck typ=%d fst=%qd end=%qd\n",
2965 lop->nfslo_type, lop->nfslo_first,
2966 lop->nfslo_end);
2967 #endif
2968 }
2969 }
2970 }
2971 }
2972 NFSUNLOCKCLSTATE();
2973 }
2974
2975 /*
2976 * Check for duplicate open owners and opens.
2977 * (Only used as a diagnostic aid.)
2978 */
2979 void
2980 nfscl_dupopen(vnode_t vp, int dupopens)
2981 {
2982 struct nfsclclient *clp;
2983 struct nfsclowner *owp, *owp2;
2984 struct nfsclopen *op, *op2;
2985 struct nfsfh *nfhp;
2986
2987 clp = VFSTONFS(vp->v_mount)->nm_clp;
2988 if (clp == NULL) {
2989 printf("nfscl dupopen NULL clp\n");
2990 return;
2991 }
2992 nfhp = VTONFS(vp)->n_fhp;
2993 NFSLOCKCLSTATE();
2994
2995 /*
2996 * First, search for duplicate owners.
2997 * These should never happen!
2998 */
2999 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3000 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3001 if (owp != owp2 &&
3002 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
3003 NFSV4CL_LOCKNAMELEN)) {
3004 NFSUNLOCKCLSTATE();
3005 printf("DUP OWNER\n");
3006 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3007 return;
3008 }
3009 }
3010 }
3011
3012 /*
3013 * Now, search for duplicate stateids.
3014 * These shouldn't happen, either.
3015 */
3016 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3017 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3018 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3019 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3020 if (op != op2 &&
3021 (op->nfso_stateid.other[0] != 0 ||
3022 op->nfso_stateid.other[1] != 0 ||
3023 op->nfso_stateid.other[2] != 0) &&
3024 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
3025 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
3026 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
3027 NFSUNLOCKCLSTATE();
3028 printf("DUP STATEID\n");
3029 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0, 0);
3030 return;
3031 }
3032 }
3033 }
3034 }
3035 }
3036
3037 /*
3038 * Now search for duplicate opens.
3039 * Duplicate opens for the same owner
3040 * should never occur. Other duplicates are
3041 * possible and are checked for if "dupopens"
3042 * is true.
3043 */
3044 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3045 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3046 if (nfhp->nfh_len == op2->nfso_fhlen &&
3047 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3048 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3049 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3050 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3051 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3052 (!NFSBCMP(op->nfso_own->nfsow_owner,
3053 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3054 dupopens)) {
3055 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3056 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3057 NFSUNLOCKCLSTATE();
3058 printf("BADDUP OPEN\n");
3059 } else {
3060 NFSUNLOCKCLSTATE();
3061 printf("DUP OPEN\n");
3062 }
3063 nfscl_dumpstate(VFSTONFS(vp->v_mount), 1, 1, 0,
3064 0);
3065 return;
3066 }
3067 }
3068 }
3069 }
3070 }
3071 }
3072 NFSUNLOCKCLSTATE();
3073 }
3074
3075 /*
3076 * During close, find an open that needs to be dereferenced and
3077 * dereference it. If there are no more opens for this file,
3078 * log a message to that effect.
3079 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3080 * on the file's vnode.
3081 * This is the safe way, since it is difficult to identify
3082 * which open the close is for and I/O can be performed after the
3083 * close(2) system call when a file is mmap'd.
3084 * If it returns 0 for success, there will be a referenced
3085 * clp returned via clpp.
3086 */
3087 int
3088 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3089 {
3090 struct nfsclclient *clp;
3091 struct nfsclowner *owp;
3092 struct nfsclopen *op;
3093 struct nfscldeleg *dp;
3094 struct nfsfh *nfhp;
3095 int error, notdecr;
3096
3097 error = nfscl_getcl(vp->v_mount, NULL, NULL, 1, &clp);
3098 if (error)
3099 return (error);
3100 *clpp = clp;
3101
3102 nfhp = VTONFS(vp)->n_fhp;
3103 notdecr = 1;
3104 NFSLOCKCLSTATE();
3105 /*
3106 * First, look for one under a delegation that was locally issued
3107 * and just decrement the opencnt for it. Since all my Opens against
3108 * the server are DENY_NONE, I don't see a problem with hanging
3109 * onto them. (It is much easier to use one of the extant Opens
3110 * that I already have on the server when a Delegation is recalled
3111 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3112 */
3113 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3114 if (dp != NULL) {
3115 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3116 op = LIST_FIRST(&owp->nfsow_open);
3117 if (op != NULL) {
3118 /*
3119 * Since a delegation is for a file, there
3120 * should never be more than one open for
3121 * each openowner.
3122 */
3123 if (LIST_NEXT(op, nfso_list) != NULL)
3124 panic("nfscdeleg opens");
3125 if (notdecr && op->nfso_opencnt > 0) {
3126 notdecr = 0;
3127 op->nfso_opencnt--;
3128 break;
3129 }
3130 }
3131 }
3132 }
3133
3134 /* Now process the opens against the server. */
3135 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3136 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3137 if (op->nfso_fhlen == nfhp->nfh_len &&
3138 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3139 nfhp->nfh_len)) {
3140 /* Found an open, decrement cnt if possible */
3141 if (notdecr && op->nfso_opencnt > 0) {
3142 notdecr = 0;
3143 op->nfso_opencnt--;
3144 }
3145 /*
3146 * There are more opens, so just return.
3147 */
3148 if (op->nfso_opencnt > 0) {
3149 NFSUNLOCKCLSTATE();
3150 return (0);
3151 }
3152 }
3153 }
3154 }
3155 NFSUNLOCKCLSTATE();
3156 if (notdecr)
3157 printf("nfscl: never fnd open\n");
3158 return (0);
3159 }
3160
3161 int
3162 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3163 {
3164 struct nfsclclient *clp;
3165 struct nfsclowner *owp, *nowp;
3166 struct nfsclopen *op;
3167 struct nfscldeleg *dp;
3168 struct nfsfh *nfhp;
3169 struct nfsclrecalllayout *recallp;
3170 int error;
3171
3172 error = nfscl_getcl(vp->v_mount, NULL, NULL, 1, &clp);
3173 if (error)
3174 return (error);
3175 *clpp = clp;
3176
3177 nfhp = VTONFS(vp)->n_fhp;
3178 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3179 NFSLOCKCLSTATE();
3180 /*
3181 * First get rid of the local Open structures, which should be no
3182 * longer in use.
3183 */
3184 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3185 if (dp != NULL) {
3186 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3187 op = LIST_FIRST(&owp->nfsow_open);
3188 if (op != NULL) {
3189 KASSERT((op->nfso_opencnt == 0),
3190 ("nfscl: bad open cnt on deleg"));
3191 nfscl_freeopen(op, 1);
3192 }
3193 nfscl_freeopenowner(owp, 1);
3194 }
3195 }
3196
3197 /* Return any layouts marked return on close. */
3198 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp);
3199
3200 /* Now process the opens against the server. */
3201 lookformore:
3202 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3203 op = LIST_FIRST(&owp->nfsow_open);
3204 while (op != NULL) {
3205 if (op->nfso_fhlen == nfhp->nfh_len &&
3206 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3207 nfhp->nfh_len)) {
3208 /* Found an open, close it. */
3209 #ifdef DIAGNOSTIC
3210 KASSERT((op->nfso_opencnt == 0),
3211 ("nfscl: bad open cnt on server (%d)",
3212 op->nfso_opencnt));
3213 #endif
3214 NFSUNLOCKCLSTATE();
3215 nfsrpc_doclose(VFSTONFS(vp->v_mount), op, p);
3216 NFSLOCKCLSTATE();
3217 goto lookformore;
3218 }
3219 op = LIST_NEXT(op, nfso_list);
3220 }
3221 }
3222 NFSUNLOCKCLSTATE();
3223 /*
3224 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3225 * used by the function, but calling free() with a NULL pointer is ok.
3226 */
3227 free(recallp, M_NFSLAYRECALL);
3228 return (0);
3229 }
3230
3231 /*
3232 * Return all delegations on this client.
3233 * (Must be called with client sleep lock.)
3234 */
3235 static void
3236 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
3237 {
3238 struct nfscldeleg *dp, *ndp;
3239 struct ucred *cred;
3240
3241 cred = newnfs_getcred();
3242 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3243 nfscl_cleandeleg(dp);
3244 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3245 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3246 }
3247 NFSFREECRED(cred);
3248 }
3249
3250 /*
3251 * Do a callback RPC.
3252 */
3253 void
3254 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3255 {
3256 int clist, gotseq_ok, i, j, k, op, rcalls;
3257 u_int32_t *tl;
3258 struct nfsclclient *clp;
3259 struct nfscldeleg *dp = NULL;
3260 int numops, taglen = -1, error = 0, trunc __unused;
3261 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3262 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3263 vnode_t vp = NULL;
3264 struct nfsnode *np;
3265 struct vattr va;
3266 struct nfsfh *nfhp;
3267 mount_t mp;
3268 nfsattrbit_t attrbits, rattrbits;
3269 nfsv4stateid_t stateid;
3270 uint32_t seqid, slotid = 0, highslot, cachethis __unused;
3271 uint8_t sessionid[NFSX_V4SESSIONID];
3272 struct mbuf *rep;
3273 struct nfscllayout *lyp;
3274 uint64_t filesid[2], len, off;
3275 int changed, gotone, laytype, recalltype;
3276 uint32_t iomode;
3277 struct nfsclrecalllayout *recallp = NULL;
3278 struct nfsclsession *tsep;
3279
3280 gotseq_ok = 0;
3281 nfsrvd_rephead(nd);
3282 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3283 taglen = fxdr_unsigned(int, *tl);
3284 if (taglen < 0) {
3285 error = EBADRPC;
3286 goto nfsmout;
3287 }
3288 if (taglen <= NFSV4_SMALLSTR)
3289 tagstr = tag;
3290 else
3291 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3292 error = nfsrv_mtostr(nd, tagstr, taglen);
3293 if (error) {
3294 if (taglen > NFSV4_SMALLSTR)
3295 free(tagstr, M_TEMP);
3296 taglen = -1;
3297 goto nfsmout;
3298 }
3299 (void) nfsm_strtom(nd, tag, taglen);
3300 if (taglen > NFSV4_SMALLSTR) {
3301 free(tagstr, M_TEMP);
3302 }
3303 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3304 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3305 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3306 if (minorvers != NFSV4_MINORVERSION &&
3307 minorvers != NFSV41_MINORVERSION &&
3308 minorvers != NFSV42_MINORVERSION)
3309 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3310 cbident = fxdr_unsigned(u_int32_t, *tl++);
3311 if (nd->nd_repstat)
3312 numops = 0;
3313 else
3314 numops = fxdr_unsigned(int, *tl);
3315 /*
3316 * Loop around doing the sub ops.
3317 */
3318 for (i = 0; i < numops; i++) {
3319 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3320 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3321 *repp++ = *tl;
3322 op = fxdr_unsigned(int, *tl);
3323 if (op < NFSV4OP_CBGETATTR ||
3324 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3325 (op > NFSV4OP_CBNOTIFYDEVID &&
3326 minorvers == NFSV41_MINORVERSION) ||
3327 (op > NFSV4OP_CBOFFLOAD &&
3328 minorvers == NFSV42_MINORVERSION)) {
3329 nd->nd_repstat = NFSERR_OPILLEGAL;
3330 *repp = nfscl_errmap(nd, minorvers);
3331 retops++;
3332 break;
3333 }
3334 nd->nd_procnum = op;
3335 if (op < NFSV42_CBNOPS)
3336 nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
3337 switch (op) {
3338 case NFSV4OP_CBGETATTR:
3339 NFSCL_DEBUG(4, "cbgetattr\n");
3340 mp = NULL;
3341 vp = NULL;
3342 error = nfsm_getfh(nd, &nfhp);
3343 if (!error)
3344 error = nfsrv_getattrbits(nd, &attrbits,
3345 NULL, NULL);
3346 if (error == 0 && i == 0 &&
3347 minorvers != NFSV4_MINORVERSION)
3348 error = NFSERR_OPNOTINSESS;
3349 if (!error) {
3350 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3351 &clp);
3352 if (mp == NULL)
3353 error = NFSERR_SERVERFAULT;
3354 }
3355 if (!error) {
3356 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3357 nfhp->nfh_len, p, &np);
3358 if (!error)
3359 vp = NFSTOV(np);
3360 }
3361 if (!error) {
3362 NFSZERO_ATTRBIT(&rattrbits);
3363 NFSLOCKCLSTATE();
3364 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3365 nfhp->nfh_len);
3366 if (dp != NULL) {
3367 if (NFSISSET_ATTRBIT(&attrbits,
3368 NFSATTRBIT_SIZE)) {
3369 if (vp != NULL)
3370 va.va_size = np->n_size;
3371 else
3372 va.va_size =
3373 dp->nfsdl_size;
3374 NFSSETBIT_ATTRBIT(&rattrbits,
3375 NFSATTRBIT_SIZE);
3376 }
3377 if (NFSISSET_ATTRBIT(&attrbits,
3378 NFSATTRBIT_CHANGE)) {
3379 va.va_filerev =
3380 dp->nfsdl_change;
3381 if (vp == NULL ||
3382 (np->n_flag & NDELEGMOD))
3383 va.va_filerev++;
3384 NFSSETBIT_ATTRBIT(&rattrbits,
3385 NFSATTRBIT_CHANGE);
3386 }
3387 } else
3388 error = NFSERR_SERVERFAULT;
3389 NFSUNLOCKCLSTATE();
3390 }
3391 if (vp != NULL)
3392 vrele(vp);
3393 if (mp != NULL)
3394 vfs_unbusy(mp);
3395 if (nfhp != NULL)
3396 free(nfhp, M_NFSFH);
3397 if (!error)
3398 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3399 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3400 (uint64_t)0, NULL);
3401 break;
3402 case NFSV4OP_CBRECALL:
3403 NFSCL_DEBUG(4, "cbrecall\n");
3404 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3405 NFSX_UNSIGNED);
3406 stateid.seqid = *tl++;
3407 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3408 NFSX_STATEIDOTHER);
3409 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3410 trunc = fxdr_unsigned(int, *tl);
3411 error = nfsm_getfh(nd, &nfhp);
3412 if (error == 0 && i == 0 &&
3413 minorvers != NFSV4_MINORVERSION)
3414 error = NFSERR_OPNOTINSESS;
3415 if (!error) {
3416 NFSLOCKCLSTATE();
3417 if (minorvers == NFSV4_MINORVERSION)
3418 clp = nfscl_getclnt(cbident);
3419 else
3420 clp = nfscl_getclntsess(sessionid);
3421 if (clp != NULL) {
3422 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3423 nfhp->nfh_len);
3424 if (dp != NULL && (dp->nfsdl_flags &
3425 NFSCLDL_DELEGRET) == 0) {
3426 dp->nfsdl_flags |=
3427 NFSCLDL_RECALL;
3428 wakeup((caddr_t)clp);
3429 }
3430 } else {
3431 error = NFSERR_SERVERFAULT;
3432 }
3433 NFSUNLOCKCLSTATE();
3434 }
3435 if (nfhp != NULL)
3436 free(nfhp, M_NFSFH);
3437 break;
3438 case NFSV4OP_CBLAYOUTRECALL:
3439 NFSCL_DEBUG(4, "cblayrec\n");
3440 nfhp = NULL;
3441 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3442 laytype = fxdr_unsigned(int, *tl++);
3443 iomode = fxdr_unsigned(uint32_t, *tl++);
3444 if (newnfs_true == *tl++)
3445 changed = 1;
3446 else
3447 changed = 0;
3448 recalltype = fxdr_unsigned(int, *tl);
3449 NFSCL_DEBUG(4, "layt=%d iom=%d ch=%d rectyp=%d\n",
3450 laytype, iomode, changed, recalltype);
3451 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3452 M_WAITOK);
3453 if (laytype != NFSLAYOUT_NFSV4_1_FILES &&
3454 laytype != NFSLAYOUT_FLEXFILE)
3455 error = NFSERR_NOMATCHLAYOUT;
3456 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3457 error = nfsm_getfh(nd, &nfhp);
3458 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3459 if (error != 0)
3460 goto nfsmout;
3461 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3462 NFSX_STATEID);
3463 off = fxdr_hyper(tl); tl += 2;
3464 len = fxdr_hyper(tl); tl += 2;
3465 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3466 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3467 if (minorvers == NFSV4_MINORVERSION)
3468 error = NFSERR_NOTSUPP;
3469 else if (i == 0)
3470 error = NFSERR_OPNOTINSESS;
3471 NFSCL_DEBUG(4, "off=%ju len=%ju sq=%u err=%d\n",
3472 (uintmax_t)off, (uintmax_t)len,
3473 stateid.seqid, error);
3474 if (error == 0) {
3475 NFSLOCKCLSTATE();
3476 clp = nfscl_getclntsess(sessionid);
3477 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3478 if (clp != NULL) {
3479 lyp = nfscl_findlayout(clp,
3480 nfhp->nfh_fh,
3481 nfhp->nfh_len);
3482 NFSCL_DEBUG(4, "cblyp=%p\n",
3483 lyp);
3484 if (lyp != NULL &&
3485 (lyp->nfsly_flags &
3486 (NFSLY_FILES |
3487 NFSLY_FLEXFILE)) != 0 &&
3488 !NFSBCMP(stateid.other,
3489 lyp->nfsly_stateid.other,
3490 NFSX_STATEIDOTHER)) {
3491 error =
3492 nfscl_layoutrecall(
3493 recalltype,
3494 lyp, iomode, off,
3495 len, stateid.seqid,
3496 0, 0, NULL,
3497 recallp);
3498 recallp = NULL;
3499 wakeup(clp);
3500 NFSCL_DEBUG(4,
3501 "aft layrcal=%d\n",
3502 error);
3503 } else
3504 error =
3505 NFSERR_NOMATCHLAYOUT;
3506 } else
3507 error = NFSERR_NOMATCHLAYOUT;
3508 NFSUNLOCKCLSTATE();
3509 }
3510 free(nfhp, M_NFSFH);
3511 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3512 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3513 filesid[0] = fxdr_hyper(tl); tl += 2;
3514 filesid[1] = fxdr_hyper(tl); tl += 2;
3515 gotone = 0;
3516 NFSLOCKCLSTATE();
3517 clp = nfscl_getclntsess(sessionid);
3518 if (clp != NULL) {
3519 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3520 nfsly_list) {
3521 if (lyp->nfsly_filesid[0] ==
3522 filesid[0] &&
3523 lyp->nfsly_filesid[1] ==
3524 filesid[1]) {
3525 error =
3526 nfscl_layoutrecall(
3527 recalltype,
3528 lyp, iomode, 0,
3529 UINT64_MAX,
3530 lyp->nfsly_stateid.seqid,
3531 0, 0, NULL,
3532 recallp);
3533 recallp = NULL;
3534 gotone = 1;
3535 }
3536 }
3537 if (gotone != 0)
3538 wakeup(clp);
3539 else
3540 error = NFSERR_NOMATCHLAYOUT;
3541 } else
3542 error = NFSERR_NOMATCHLAYOUT;
3543 NFSUNLOCKCLSTATE();
3544 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3545 gotone = 0;
3546 NFSLOCKCLSTATE();
3547 clp = nfscl_getclntsess(sessionid);
3548 if (clp != NULL) {
3549 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3550 nfsly_list) {
3551 error = nfscl_layoutrecall(
3552 recalltype, lyp, iomode, 0,
3553 UINT64_MAX,
3554 lyp->nfsly_stateid.seqid,
3555 0, 0, NULL, recallp);
3556 recallp = NULL;
3557 gotone = 1;
3558 }
3559 if (gotone != 0)
3560 wakeup(clp);
3561 else
3562 error = NFSERR_NOMATCHLAYOUT;
3563 } else
3564 error = NFSERR_NOMATCHLAYOUT;
3565 NFSUNLOCKCLSTATE();
3566 } else
3567 error = NFSERR_NOMATCHLAYOUT;
3568 if (recallp != NULL) {
3569 free(recallp, M_NFSLAYRECALL);
3570 recallp = NULL;
3571 }
3572 break;
3573 case NFSV4OP_CBSEQUENCE:
3574 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3575 5 * NFSX_UNSIGNED);
3576 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3577 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3578 seqid = fxdr_unsigned(uint32_t, *tl++);
3579 slotid = fxdr_unsigned(uint32_t, *tl++);
3580 highslot = fxdr_unsigned(uint32_t, *tl++);
3581 cachethis = *tl++;
3582 /* Throw away the referring call stuff. */
3583 clist = fxdr_unsigned(int, *tl);
3584 for (j = 0; j < clist; j++) {
3585 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3586 NFSX_UNSIGNED);
3587 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3588 rcalls = fxdr_unsigned(int, *tl);
3589 for (k = 0; k < rcalls; k++) {
3590 NFSM_DISSECT(tl, uint32_t *,
3591 2 * NFSX_UNSIGNED);
3592 }
3593 }
3594 NFSLOCKCLSTATE();
3595 if (i == 0) {
3596 clp = nfscl_getclntsess(sessionid);
3597 if (clp == NULL)
3598 error = NFSERR_SERVERFAULT;
3599 } else
3600 error = NFSERR_SEQUENCEPOS;
3601 if (error == 0) {
3602 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3603 error = nfsv4_seqsession(seqid, slotid,
3604 highslot, tsep->nfsess_cbslots, &rep,
3605 tsep->nfsess_backslots);
3606 }
3607 NFSUNLOCKCLSTATE();
3608 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3609 gotseq_ok = 1;
3610 if (rep != NULL) {
3611 /*
3612 * Handle a reply for a retried
3613 * callback. The reply will be
3614 * re-inserted in the session cache
3615 * by the nfsv4_seqsess_cacherep() call
3616 * after out:
3617 */
3618 KASSERT(error == NFSERR_REPLYFROMCACHE,
3619 ("cbsequence: non-NULL rep"));
3620 NFSCL_DEBUG(4, "Got cbretry\n");
3621 m_freem(nd->nd_mreq);
3622 nd->nd_mreq = rep;
3623 rep = NULL;
3624 goto out;
3625 }
3626 NFSM_BUILD(tl, uint32_t *,
3627 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3628 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3629 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3630 *tl++ = txdr_unsigned(seqid);
3631 *tl++ = txdr_unsigned(slotid);
3632 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3633 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3634 }
3635 break;
3636 default:
3637 if (i == 0 && minorvers != NFSV4_MINORVERSION)
3638 error = NFSERR_OPNOTINSESS;
3639 else {
3640 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
3641 error = NFSERR_NOTSUPP;
3642 }
3643 break;
3644 }
3645 if (error) {
3646 if (error == EBADRPC || error == NFSERR_BADXDR) {
3647 nd->nd_repstat = NFSERR_BADXDR;
3648 } else {
3649 nd->nd_repstat = error;
3650 }
3651 error = 0;
3652 }
3653 retops++;
3654 if (nd->nd_repstat) {
3655 *repp = nfscl_errmap(nd, minorvers);
3656 break;
3657 } else
3658 *repp = 0; /* NFS4_OK */
3659 }
3660 nfsmout:
3661 if (recallp != NULL)
3662 free(recallp, M_NFSLAYRECALL);
3663 if (error) {
3664 if (error == EBADRPC || error == NFSERR_BADXDR)
3665 nd->nd_repstat = NFSERR_BADXDR;
3666 else
3667 printf("nfsv4 comperr1=%d\n", error);
3668 }
3669 if (taglen == -1) {
3670 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3671 *tl++ = 0;
3672 *tl = 0;
3673 } else {
3674 *retopsp = txdr_unsigned(retops);
3675 }
3676 *nd->nd_errp = nfscl_errmap(nd, minorvers);
3677 out:
3678 if (gotseq_ok != 0) {
3679 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
3680 NFSLOCKCLSTATE();
3681 clp = nfscl_getclntsess(sessionid);
3682 if (clp != NULL) {
3683 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3684 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
3685 NFSERR_OK, &rep);
3686 NFSUNLOCKCLSTATE();
3687 } else {
3688 NFSUNLOCKCLSTATE();
3689 m_freem(rep);
3690 }
3691 }
3692 }
3693
3694 /*
3695 * Generate the next cbident value. Basically just increment a static value
3696 * and then check that it isn't already in the list, if it has wrapped around.
3697 */
3698 static u_int32_t
3699 nfscl_nextcbident(void)
3700 {
3701 struct nfsclclient *clp;
3702 int matched;
3703 static u_int32_t nextcbident = 0;
3704 static int haswrapped = 0;
3705
3706 nextcbident++;
3707 if (nextcbident == 0)
3708 haswrapped = 1;
3709 if (haswrapped) {
3710 /*
3711 * Search the clientid list for one already using this cbident.
3712 */
3713 do {
3714 matched = 0;
3715 NFSLOCKCLSTATE();
3716 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3717 if (clp->nfsc_cbident == nextcbident) {
3718 matched = 1;
3719 break;
3720 }
3721 }
3722 NFSUNLOCKCLSTATE();
3723 if (matched == 1)
3724 nextcbident++;
3725 } while (matched);
3726 }
3727 return (nextcbident);
3728 }
3729
3730 /*
3731 * Get the mount point related to a given cbident or session and busy it.
3732 */
3733 static mount_t
3734 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
3735 struct nfsclclient **clpp)
3736 {
3737 struct nfsclclient *clp;
3738 mount_t mp;
3739 int error;
3740 struct nfsclsession *tsep;
3741
3742 *clpp = NULL;
3743 NFSLOCKCLSTATE();
3744 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3745 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3746 if (minorvers == NFSV4_MINORVERSION) {
3747 if (clp->nfsc_cbident == cbident)
3748 break;
3749 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3750 NFSX_V4SESSIONID))
3751 break;
3752 }
3753 if (clp == NULL) {
3754 NFSUNLOCKCLSTATE();
3755 return (NULL);
3756 }
3757 mp = clp->nfsc_nmp->nm_mountp;
3758 vfs_ref(mp);
3759 NFSUNLOCKCLSTATE();
3760 error = vfs_busy(mp, 0);
3761 vfs_rel(mp);
3762 if (error != 0)
3763 return (NULL);
3764 *clpp = clp;
3765 return (mp);
3766 }
3767
3768 /*
3769 * Get the clientid pointer related to a given cbident.
3770 */
3771 static struct nfsclclient *
3772 nfscl_getclnt(u_int32_t cbident)
3773 {
3774 struct nfsclclient *clp;
3775
3776 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
3777 if (clp->nfsc_cbident == cbident)
3778 break;
3779 return (clp);
3780 }
3781
3782 /*
3783 * Get the clientid pointer related to a given sessionid.
3784 */
3785 static struct nfsclclient *
3786 nfscl_getclntsess(uint8_t *sessionid)
3787 {
3788 struct nfsclclient *clp;
3789 struct nfsclsession *tsep;
3790
3791 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3792 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3793 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3794 NFSX_V4SESSIONID))
3795 break;
3796 }
3797 return (clp);
3798 }
3799
3800 /*
3801 * Search for a lock conflict locally on the client. A conflict occurs if
3802 * - not same owner and overlapping byte range and at least one of them is
3803 * a write lock or this is an unlock.
3804 */
3805 static int
3806 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3807 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3808 struct nfscllock **lopp)
3809 {
3810 struct nfsclowner *owp;
3811 struct nfsclopen *op;
3812 int ret;
3813
3814 if (dp != NULL) {
3815 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3816 if (ret)
3817 return (ret);
3818 }
3819 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3820 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3821 if (op->nfso_fhlen == fhlen &&
3822 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3823 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3824 own, lopp);
3825 if (ret)
3826 return (ret);
3827 }
3828 }
3829 }
3830 return (0);
3831 }
3832
3833 static int
3834 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3835 u_int8_t *own, struct nfscllock **lopp)
3836 {
3837 struct nfscllockowner *lp;
3838 struct nfscllock *lop;
3839
3840 LIST_FOREACH(lp, lhp, nfsl_list) {
3841 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3842 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3843 if (lop->nfslo_first >= nlop->nfslo_end)
3844 break;
3845 if (lop->nfslo_end <= nlop->nfslo_first)
3846 continue;
3847 if (lop->nfslo_type == F_WRLCK ||
3848 nlop->nfslo_type == F_WRLCK ||
3849 nlop->nfslo_type == F_UNLCK) {
3850 if (lopp != NULL)
3851 *lopp = lop;
3852 return (NFSERR_DENIED);
3853 }
3854 }
3855 }
3856 }
3857 return (0);
3858 }
3859
3860 /*
3861 * Check for a local conflicting lock.
3862 */
3863 int
3864 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3865 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3866 {
3867 struct nfscllock *lop, nlck;
3868 struct nfscldeleg *dp;
3869 struct nfsnode *np;
3870 u_int8_t own[NFSV4CL_LOCKNAMELEN];
3871 int error;
3872
3873 nlck.nfslo_type = fl->l_type;
3874 nlck.nfslo_first = off;
3875 if (len == NFS64BITSSET) {
3876 nlck.nfslo_end = NFS64BITSSET;
3877 } else {
3878 nlck.nfslo_end = off + len;
3879 if (nlck.nfslo_end <= nlck.nfslo_first)
3880 return (NFSERR_INVAL);
3881 }
3882 np = VTONFS(vp);
3883 nfscl_filllockowner(id, own, flags);
3884 NFSLOCKCLSTATE();
3885 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3886 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3887 &nlck, own, dp, &lop);
3888 if (error != 0) {
3889 fl->l_whence = SEEK_SET;
3890 fl->l_start = lop->nfslo_first;
3891 if (lop->nfslo_end == NFS64BITSSET)
3892 fl->l_len = 0;
3893 else
3894 fl->l_len = lop->nfslo_end - lop->nfslo_first;
3895 fl->l_pid = (pid_t)0;
3896 fl->l_type = lop->nfslo_type;
3897 error = -1; /* no RPC required */
3898 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3899 fl->l_type == F_RDLCK)) {
3900 /*
3901 * The delegation ensures that there isn't a conflicting
3902 * lock on the server, so return -1 to indicate an RPC
3903 * isn't required.
3904 */
3905 fl->l_type = F_UNLCK;
3906 error = -1;
3907 }
3908 NFSUNLOCKCLSTATE();
3909 return (error);
3910 }
3911
3912 /*
3913 * Handle Recall of a delegation.
3914 * The clp must be exclusive locked when this is called.
3915 */
3916 static int
3917 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3918 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3919 int called_from_renewthread)
3920 {
3921 struct nfsclowner *owp, *lowp, *nowp;
3922 struct nfsclopen *op, *lop;
3923 struct nfscllockowner *lp;
3924 struct nfscllock *lckp;
3925 struct nfsnode *np;
3926 int error = 0, ret, gotvp = 0;
3927
3928 if (vp == NULL) {
3929 /*
3930 * First, get a vnode for the file. This is needed to do RPCs.
3931 */
3932 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3933 dp->nfsdl_fhlen, p, &np);
3934 if (ret) {
3935 /*
3936 * File isn't open, so nothing to move over to the
3937 * server.
3938 */
3939 return (0);
3940 }
3941 vp = NFSTOV(np);
3942 gotvp = 1;
3943 } else {
3944 np = VTONFS(vp);
3945 }
3946 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3947
3948 /*
3949 * Ok, if it's a write delegation, flush data to the server, so
3950 * that close/open consistency is retained.
3951 */
3952 ret = 0;
3953 NFSLOCKNODE(np);
3954 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3955 np->n_flag |= NDELEGRECALL;
3956 NFSUNLOCKNODE(np);
3957 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
3958 NFSLOCKNODE(np);
3959 np->n_flag &= ~NDELEGRECALL;
3960 }
3961 NFSINVALATTRCACHE(np);
3962 NFSUNLOCKNODE(np);
3963 if (ret == EIO && called_from_renewthread != 0) {
3964 /*
3965 * If the flush failed with EIO for the renew thread,
3966 * return now, so that the dirty buffer will be flushed
3967 * later.
3968 */
3969 if (gotvp != 0)
3970 vrele(vp);
3971 return (ret);
3972 }
3973
3974 /*
3975 * Now, for each openowner with opens issued locally, move them
3976 * over to state against the server.
3977 */
3978 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3979 lop = LIST_FIRST(&lowp->nfsow_open);
3980 if (lop != NULL) {
3981 if (LIST_NEXT(lop, nfso_list) != NULL)
3982 panic("nfsdlg mult opens");
3983 /*
3984 * Look for the same openowner against the server.
3985 */
3986 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3987 if (!NFSBCMP(lowp->nfsow_owner,
3988 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3989 newnfs_copycred(&dp->nfsdl_cred, cred);
3990 ret = nfscl_moveopen(vp, clp, nmp, lop,
3991 owp, dp, cred, p);
3992 if (ret == NFSERR_STALECLIENTID ||
3993 ret == NFSERR_STALEDONTRECOVER ||
3994 ret == NFSERR_BADSESSION) {
3995 if (gotvp)
3996 vrele(vp);
3997 return (ret);
3998 }
3999 if (ret) {
4000 nfscl_freeopen(lop, 1);
4001 if (!error)
4002 error = ret;
4003 }
4004 break;
4005 }
4006 }
4007
4008 /*
4009 * If no openowner found, create one and get an open
4010 * for it.
4011 */
4012 if (owp == NULL) {
4013 nowp = malloc(
4014 sizeof (struct nfsclowner), M_NFSCLOWNER,
4015 M_WAITOK);
4016 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
4017 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
4018 dp->nfsdl_fhlen, NULL, NULL);
4019 newnfs_copycred(&dp->nfsdl_cred, cred);
4020 ret = nfscl_moveopen(vp, clp, nmp, lop,
4021 owp, dp, cred, p);
4022 if (ret) {
4023 nfscl_freeopenowner(owp, 0);
4024 if (ret == NFSERR_STALECLIENTID ||
4025 ret == NFSERR_STALEDONTRECOVER ||
4026 ret == NFSERR_BADSESSION) {
4027 if (gotvp)
4028 vrele(vp);
4029 return (ret);
4030 }
4031 if (ret) {
4032 nfscl_freeopen(lop, 1);
4033 if (!error)
4034 error = ret;
4035 }
4036 }
4037 }
4038 }
4039 }
4040
4041 /*
4042 * Now, get byte range locks for any locks done locally.
4043 */
4044 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4045 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4046 newnfs_copycred(&dp->nfsdl_cred, cred);
4047 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4048 if (ret == NFSERR_STALESTATEID ||
4049 ret == NFSERR_STALEDONTRECOVER ||
4050 ret == NFSERR_STALECLIENTID ||
4051 ret == NFSERR_BADSESSION) {
4052 if (gotvp)
4053 vrele(vp);
4054 return (ret);
4055 }
4056 if (ret && !error)
4057 error = ret;
4058 }
4059 }
4060 if (gotvp)
4061 vrele(vp);
4062 return (error);
4063 }
4064
4065 /*
4066 * Move a locally issued open over to an owner on the state list.
4067 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4068 * returns with it unlocked.
4069 */
4070 static int
4071 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4072 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4073 struct ucred *cred, NFSPROC_T *p)
4074 {
4075 struct nfsclopen *op, *nop;
4076 struct nfscldeleg *ndp;
4077 struct nfsnode *np;
4078 int error = 0, newone;
4079
4080 /*
4081 * First, look for an appropriate open, If found, just increment the
4082 * opencnt in it.
4083 */
4084 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4085 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4086 op->nfso_fhlen == lop->nfso_fhlen &&
4087 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4088 op->nfso_opencnt += lop->nfso_opencnt;
4089 nfscl_freeopen(lop, 1);
4090 return (0);
4091 }
4092 }
4093
4094 /* No appropriate open, so we have to do one against the server. */
4095 np = VTONFS(vp);
4096 nop = malloc(sizeof (struct nfsclopen) +
4097 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4098 newone = 0;
4099 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4100 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4101 ndp = dp;
4102 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
4103 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4104 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4105 if (error) {
4106 if (newone)
4107 nfscl_freeopen(op, 0);
4108 } else {
4109 op->nfso_mode |= lop->nfso_mode;
4110 op->nfso_opencnt += lop->nfso_opencnt;
4111 nfscl_freeopen(lop, 1);
4112 }
4113 if (nop != NULL)
4114 free(nop, M_NFSCLOPEN);
4115 if (ndp != NULL) {
4116 /*
4117 * What should I do with the returned delegation, since the
4118 * delegation is being recalled? For now, just printf and
4119 * through it away.
4120 */
4121 printf("Moveopen returned deleg\n");
4122 free(ndp, M_NFSCLDELEG);
4123 }
4124 return (error);
4125 }
4126
4127 /*
4128 * Recall all delegations on this client.
4129 */
4130 static void
4131 nfscl_totalrecall(struct nfsclclient *clp)
4132 {
4133 struct nfscldeleg *dp;
4134
4135 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4136 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4137 dp->nfsdl_flags |= NFSCLDL_RECALL;
4138 }
4139 }
4140
4141 /*
4142 * Relock byte ranges. Called for delegation recall and state expiry.
4143 */
4144 static int
4145 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4146 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4147 NFSPROC_T *p)
4148 {
4149 struct nfscllockowner *nlp;
4150 struct nfsfh *nfhp;
4151 u_int64_t off, len;
4152 int error, newone, donelocally;
4153
4154 off = lop->nfslo_first;
4155 len = lop->nfslo_end - lop->nfslo_first;
4156 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4157 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4158 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4159 if (error || donelocally)
4160 return (error);
4161 nfhp = VTONFS(vp)->n_fhp;
4162 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4163 nfhp->nfh_len, nlp, newone, 0, off,
4164 len, lop->nfslo_type, cred, p);
4165 if (error)
4166 nfscl_freelockowner(nlp, 0);
4167 return (error);
4168 }
4169
4170 /*
4171 * Called to re-open a file. Basically get a vnode for the file handle
4172 * and then call nfsrpc_openrpc() to do the rest.
4173 */
4174 static int
4175 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4176 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4177 struct ucred *cred, NFSPROC_T *p)
4178 {
4179 struct nfsnode *np;
4180 vnode_t vp;
4181 int error;
4182
4183 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4184 if (error)
4185 return (error);
4186 vp = NFSTOV(np);
4187 if (np->n_v4 != NULL) {
4188 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4189 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4190 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4191 cred, p);
4192 } else {
4193 error = EINVAL;
4194 }
4195 vrele(vp);
4196 return (error);
4197 }
4198
4199 /*
4200 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4201 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4202 * fail.
4203 */
4204 static int
4205 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4206 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4207 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4208 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4209 {
4210 int error;
4211
4212 do {
4213 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4214 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4215 0, 0);
4216 if (error == NFSERR_DELAY)
4217 (void) nfs_catnap(PZERO, error, "nfstryop");
4218 } while (error == NFSERR_DELAY);
4219 if (error == EAUTH || error == EACCES) {
4220 /* Try again using system credentials */
4221 newnfs_setroot(cred);
4222 do {
4223 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4224 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4225 delegtype, cred, p, 1, 0);
4226 if (error == NFSERR_DELAY)
4227 (void) nfs_catnap(PZERO, error, "nfstryop");
4228 } while (error == NFSERR_DELAY);
4229 }
4230 return (error);
4231 }
4232
4233 /*
4234 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4235 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4236 * cred don't work.
4237 */
4238 static int
4239 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4240 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4241 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4242 {
4243 struct nfsrv_descript nfsd, *nd = &nfsd;
4244 int error;
4245
4246 do {
4247 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4248 reclaim, off, len, type, cred, p, 0);
4249 if (!error && nd->nd_repstat == NFSERR_DELAY)
4250 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4251 "nfstrylck");
4252 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4253 if (!error)
4254 error = nd->nd_repstat;
4255 if (error == EAUTH || error == EACCES) {
4256 /* Try again using root credentials */
4257 newnfs_setroot(cred);
4258 do {
4259 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4260 newone, reclaim, off, len, type, cred, p, 1);
4261 if (!error && nd->nd_repstat == NFSERR_DELAY)
4262 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4263 "nfstrylck");
4264 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4265 if (!error)
4266 error = nd->nd_repstat;
4267 }
4268 return (error);
4269 }
4270
4271 /*
4272 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4273 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4274 * credentials fail.
4275 */
4276 static int
4277 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4278 struct nfsmount *nmp, NFSPROC_T *p)
4279 {
4280 int error;
4281
4282 do {
4283 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4284 if (error == NFSERR_DELAY)
4285 (void) nfs_catnap(PZERO, error, "nfstrydp");
4286 } while (error == NFSERR_DELAY);
4287 if (error == EAUTH || error == EACCES) {
4288 /* Try again using system credentials */
4289 newnfs_setroot(cred);
4290 do {
4291 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4292 if (error == NFSERR_DELAY)
4293 (void) nfs_catnap(PZERO, error, "nfstrydp");
4294 } while (error == NFSERR_DELAY);
4295 }
4296 return (error);
4297 }
4298
4299 /*
4300 * Try a close against the server. Just call nfsrpc_closerpc(),
4301 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4302 * credentials fail.
4303 */
4304 int
4305 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4306 struct nfsmount *nmp, NFSPROC_T *p)
4307 {
4308 struct nfsrv_descript nfsd, *nd = &nfsd;
4309 int error;
4310
4311 do {
4312 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4313 if (error == NFSERR_DELAY)
4314 (void) nfs_catnap(PZERO, error, "nfstrycl");
4315 } while (error == NFSERR_DELAY);
4316 if (error == EAUTH || error == EACCES) {
4317 /* Try again using system credentials */
4318 newnfs_setroot(cred);
4319 do {
4320 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4321 if (error == NFSERR_DELAY)
4322 (void) nfs_catnap(PZERO, error, "nfstrycl");
4323 } while (error == NFSERR_DELAY);
4324 }
4325 return (error);
4326 }
4327
4328 /*
4329 * Decide if a delegation on a file permits close without flushing writes
4330 * to the server. This might be a big performance win in some environments.
4331 * (Not useful until the client does caching on local stable storage.)
4332 */
4333 int
4334 nfscl_mustflush(vnode_t vp)
4335 {
4336 struct nfsclclient *clp;
4337 struct nfscldeleg *dp;
4338 struct nfsnode *np;
4339 struct nfsmount *nmp;
4340
4341 np = VTONFS(vp);
4342 nmp = VFSTONFS(vp->v_mount);
4343 if (!NFSHASNFSV4(nmp))
4344 return (1);
4345 NFSLOCKCLSTATE();
4346 clp = nfscl_findcl(nmp);
4347 if (clp == NULL) {
4348 NFSUNLOCKCLSTATE();
4349 return (1);
4350 }
4351 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4352 if (dp != NULL && (dp->nfsdl_flags &
4353 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4354 NFSCLDL_WRITE &&
4355 (dp->nfsdl_sizelimit >= np->n_size ||
4356 !NFSHASSTRICT3530(nmp))) {
4357 NFSUNLOCKCLSTATE();
4358 return (0);
4359 }
4360 NFSUNLOCKCLSTATE();
4361 return (1);
4362 }
4363
4364 /*
4365 * See if a (write) delegation exists for this file.
4366 */
4367 int
4368 nfscl_nodeleg(vnode_t vp, int writedeleg)
4369 {
4370 struct nfsclclient *clp;
4371 struct nfscldeleg *dp;
4372 struct nfsnode *np;
4373 struct nfsmount *nmp;
4374
4375 np = VTONFS(vp);
4376 nmp = VFSTONFS(vp->v_mount);
4377 if (!NFSHASNFSV4(nmp))
4378 return (1);
4379 NFSLOCKCLSTATE();
4380 clp = nfscl_findcl(nmp);
4381 if (clp == NULL) {
4382 NFSUNLOCKCLSTATE();
4383 return (1);
4384 }
4385 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4386 if (dp != NULL &&
4387 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4388 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4389 NFSCLDL_WRITE)) {
4390 NFSUNLOCKCLSTATE();
4391 return (0);
4392 }
4393 NFSUNLOCKCLSTATE();
4394 return (1);
4395 }
4396
4397 /*
4398 * Look for an associated delegation that should be DelegReturned.
4399 */
4400 int
4401 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4402 {
4403 struct nfsclclient *clp;
4404 struct nfscldeleg *dp;
4405 struct nfsclowner *owp;
4406 struct nfscllockowner *lp;
4407 struct nfsmount *nmp;
4408 struct ucred *cred;
4409 struct nfsnode *np;
4410 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4411
4412 nmp = VFSTONFS(vp->v_mount);
4413 np = VTONFS(vp);
4414 NFSLOCKCLSTATE();
4415 /*
4416 * Loop around waiting for:
4417 * - outstanding I/O operations on delegations to complete
4418 * - for a delegation on vp that has state, lock the client and
4419 * do a recall
4420 * - return delegation with no state
4421 */
4422 while (1) {
4423 clp = nfscl_findcl(nmp);
4424 if (clp == NULL) {
4425 NFSUNLOCKCLSTATE();
4426 return (retcnt);
4427 }
4428 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4429 np->n_fhp->nfh_len);
4430 if (dp != NULL) {
4431 /*
4432 * Wait for outstanding I/O ops to be done.
4433 */
4434 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4435 if (igotlock) {
4436 nfsv4_unlock(&clp->nfsc_lock, 0);
4437 igotlock = 0;
4438 }
4439 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4440 (void) nfsmsleep(&dp->nfsdl_rwlock,
4441 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4442 continue;
4443 }
4444 needsrecall = 0;
4445 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4446 if (!LIST_EMPTY(&owp->nfsow_open)) {
4447 needsrecall = 1;
4448 break;
4449 }
4450 }
4451 if (!needsrecall) {
4452 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4453 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4454 needsrecall = 1;
4455 break;
4456 }
4457 }
4458 }
4459 if (needsrecall && !triedrecall) {
4460 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4461 islept = 0;
4462 while (!igotlock) {
4463 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4464 &islept, NFSCLSTATEMUTEXPTR, NULL);
4465 if (islept)
4466 break;
4467 }
4468 if (islept)
4469 continue;
4470 NFSUNLOCKCLSTATE();
4471 cred = newnfs_getcred();
4472 newnfs_copycred(&dp->nfsdl_cred, cred);
4473 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
4474 NFSFREECRED(cred);
4475 triedrecall = 1;
4476 NFSLOCKCLSTATE();
4477 nfsv4_unlock(&clp->nfsc_lock, 0);
4478 igotlock = 0;
4479 continue;
4480 }
4481 *stp = dp->nfsdl_stateid;
4482 retcnt = 1;
4483 nfscl_cleandeleg(dp);
4484 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4485 }
4486 if (igotlock)
4487 nfsv4_unlock(&clp->nfsc_lock, 0);
4488 NFSUNLOCKCLSTATE();
4489 return (retcnt);
4490 }
4491 }
4492
4493 /*
4494 * Look for associated delegation(s) that should be DelegReturned.
4495 */
4496 int
4497 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4498 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4499 {
4500 struct nfsclclient *clp;
4501 struct nfscldeleg *dp;
4502 struct nfsclowner *owp;
4503 struct nfscllockowner *lp;
4504 struct nfsmount *nmp;
4505 struct ucred *cred;
4506 struct nfsnode *np;
4507 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4508
4509 nmp = VFSTONFS(fvp->v_mount);
4510 *gotfdp = 0;
4511 *gottdp = 0;
4512 NFSLOCKCLSTATE();
4513 /*
4514 * Loop around waiting for:
4515 * - outstanding I/O operations on delegations to complete
4516 * - for a delegation on fvp that has state, lock the client and
4517 * do a recall
4518 * - return delegation(s) with no state.
4519 */
4520 while (1) {
4521 clp = nfscl_findcl(nmp);
4522 if (clp == NULL) {
4523 NFSUNLOCKCLSTATE();
4524 return (retcnt);
4525 }
4526 np = VTONFS(fvp);
4527 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4528 np->n_fhp->nfh_len);
4529 if (dp != NULL && *gotfdp == 0) {
4530 /*
4531 * Wait for outstanding I/O ops to be done.
4532 */
4533 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4534 if (igotlock) {
4535 nfsv4_unlock(&clp->nfsc_lock, 0);
4536 igotlock = 0;
4537 }
4538 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4539 (void) nfsmsleep(&dp->nfsdl_rwlock,
4540 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4541 continue;
4542 }
4543 needsrecall = 0;
4544 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4545 if (!LIST_EMPTY(&owp->nfsow_open)) {
4546 needsrecall = 1;
4547 break;
4548 }
4549 }
4550 if (!needsrecall) {
4551 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4552 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4553 needsrecall = 1;
4554 break;
4555 }
4556 }
4557 }
4558 if (needsrecall && !triedrecall) {
4559 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4560 islept = 0;
4561 while (!igotlock) {
4562 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4563 &islept, NFSCLSTATEMUTEXPTR, NULL);
4564 if (islept)
4565 break;
4566 }
4567 if (islept)
4568 continue;
4569 NFSUNLOCKCLSTATE();
4570 cred = newnfs_getcred();
4571 newnfs_copycred(&dp->nfsdl_cred, cred);
4572 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
4573 NFSFREECRED(cred);
4574 triedrecall = 1;
4575 NFSLOCKCLSTATE();
4576 nfsv4_unlock(&clp->nfsc_lock, 0);
4577 igotlock = 0;
4578 continue;
4579 }
4580 *fstp = dp->nfsdl_stateid;
4581 retcnt++;
4582 *gotfdp = 1;
4583 nfscl_cleandeleg(dp);
4584 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4585 }
4586 if (igotlock) {
4587 nfsv4_unlock(&clp->nfsc_lock, 0);
4588 igotlock = 0;
4589 }
4590 if (tvp != NULL) {
4591 np = VTONFS(tvp);
4592 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4593 np->n_fhp->nfh_len);
4594 if (dp != NULL && *gottdp == 0) {
4595 /*
4596 * Wait for outstanding I/O ops to be done.
4597 */
4598 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4599 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4600 (void) nfsmsleep(&dp->nfsdl_rwlock,
4601 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4602 continue;
4603 }
4604 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4605 if (!LIST_EMPTY(&owp->nfsow_open)) {
4606 NFSUNLOCKCLSTATE();
4607 return (retcnt);
4608 }
4609 }
4610 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4611 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4612 NFSUNLOCKCLSTATE();
4613 return (retcnt);
4614 }
4615 }
4616 *tstp = dp->nfsdl_stateid;
4617 retcnt++;
4618 *gottdp = 1;
4619 nfscl_cleandeleg(dp);
4620 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4621 }
4622 }
4623 NFSUNLOCKCLSTATE();
4624 return (retcnt);
4625 }
4626 }
4627
4628 /*
4629 * Get a reference on the clientid associated with the mount point.
4630 * Return 1 if success, 0 otherwise.
4631 */
4632 int
4633 nfscl_getref(struct nfsmount *nmp)
4634 {
4635 struct nfsclclient *clp;
4636
4637 NFSLOCKCLSTATE();
4638 clp = nfscl_findcl(nmp);
4639 if (clp == NULL) {
4640 NFSUNLOCKCLSTATE();
4641 return (0);
4642 }
4643 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4644 NFSUNLOCKCLSTATE();
4645 return (1);
4646 }
4647
4648 /*
4649 * Release a reference on a clientid acquired with the above call.
4650 */
4651 void
4652 nfscl_relref(struct nfsmount *nmp)
4653 {
4654 struct nfsclclient *clp;
4655
4656 NFSLOCKCLSTATE();
4657 clp = nfscl_findcl(nmp);
4658 if (clp == NULL) {
4659 NFSUNLOCKCLSTATE();
4660 return;
4661 }
4662 nfsv4_relref(&clp->nfsc_lock);
4663 NFSUNLOCKCLSTATE();
4664 }
4665
4666 /*
4667 * Save the size attribute in the delegation, since the nfsnode
4668 * is going away.
4669 */
4670 void
4671 nfscl_reclaimnode(vnode_t vp)
4672 {
4673 struct nfsclclient *clp;
4674 struct nfscldeleg *dp;
4675 struct nfsnode *np = VTONFS(vp);
4676 struct nfsmount *nmp;
4677
4678 nmp = VFSTONFS(vp->v_mount);
4679 if (!NFSHASNFSV4(nmp))
4680 return;
4681 NFSLOCKCLSTATE();
4682 clp = nfscl_findcl(nmp);
4683 if (clp == NULL) {
4684 NFSUNLOCKCLSTATE();
4685 return;
4686 }
4687 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4688 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4689 dp->nfsdl_size = np->n_size;
4690 NFSUNLOCKCLSTATE();
4691 }
4692
4693 /*
4694 * Get the saved size attribute in the delegation, since it is a
4695 * newly allocated nfsnode.
4696 */
4697 void
4698 nfscl_newnode(vnode_t vp)
4699 {
4700 struct nfsclclient *clp;
4701 struct nfscldeleg *dp;
4702 struct nfsnode *np = VTONFS(vp);
4703 struct nfsmount *nmp;
4704
4705 nmp = VFSTONFS(vp->v_mount);
4706 if (!NFSHASNFSV4(nmp))
4707 return;
4708 NFSLOCKCLSTATE();
4709 clp = nfscl_findcl(nmp);
4710 if (clp == NULL) {
4711 NFSUNLOCKCLSTATE();
4712 return;
4713 }
4714 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4715 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4716 np->n_size = dp->nfsdl_size;
4717 NFSUNLOCKCLSTATE();
4718 }
4719
4720 /*
4721 * If there is a valid write delegation for this file, set the modtime
4722 * to the local clock time.
4723 */
4724 void
4725 nfscl_delegmodtime(vnode_t vp)
4726 {
4727 struct nfsclclient *clp;
4728 struct nfscldeleg *dp;
4729 struct nfsnode *np = VTONFS(vp);
4730 struct nfsmount *nmp;
4731
4732 nmp = VFSTONFS(vp->v_mount);
4733 if (!NFSHASNFSV4(nmp))
4734 return;
4735 NFSLOCKCLSTATE();
4736 clp = nfscl_findcl(nmp);
4737 if (clp == NULL) {
4738 NFSUNLOCKCLSTATE();
4739 return;
4740 }
4741 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4742 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4743 nanotime(&dp->nfsdl_modtime);
4744 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4745 }
4746 NFSUNLOCKCLSTATE();
4747 }
4748
4749 /*
4750 * If there is a valid write delegation for this file with a modtime set,
4751 * put that modtime in mtime.
4752 */
4753 void
4754 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4755 {
4756 struct nfsclclient *clp;
4757 struct nfscldeleg *dp;
4758 struct nfsnode *np = VTONFS(vp);
4759 struct nfsmount *nmp;
4760
4761 nmp = VFSTONFS(vp->v_mount);
4762 if (!NFSHASNFSV4(nmp))
4763 return;
4764 NFSLOCKCLSTATE();
4765 clp = nfscl_findcl(nmp);
4766 if (clp == NULL) {
4767 NFSUNLOCKCLSTATE();
4768 return;
4769 }
4770 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4771 if (dp != NULL &&
4772 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4773 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4774 *mtime = dp->nfsdl_modtime;
4775 NFSUNLOCKCLSTATE();
4776 }
4777
4778 static int
4779 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
4780 {
4781 short *defaulterrp, *errp;
4782
4783 if (!nd->nd_repstat)
4784 return (0);
4785 if (nd->nd_procnum == NFSPROC_NOOP)
4786 return (txdr_unsigned(nd->nd_repstat & 0xffff));
4787 if (nd->nd_repstat == EBADRPC)
4788 return (txdr_unsigned(NFSERR_BADXDR));
4789 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4790 nd->nd_repstat == NFSERR_OPILLEGAL)
4791 return (txdr_unsigned(nd->nd_repstat));
4792 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
4793 minorvers > NFSV4_MINORVERSION) {
4794 /* NFSv4.n error. */
4795 return (txdr_unsigned(nd->nd_repstat));
4796 }
4797 if (nd->nd_procnum < NFSV4OP_CBNOPS)
4798 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4799 else
4800 return (txdr_unsigned(nd->nd_repstat));
4801 while (*++errp)
4802 if (*errp == (short)nd->nd_repstat)
4803 return (txdr_unsigned(nd->nd_repstat));
4804 return (txdr_unsigned(*defaulterrp));
4805 }
4806
4807 /*
4808 * Called to find/add a layout to a client.
4809 * This function returns the layout with a refcnt (shared lock) upon
4810 * success (returns 0) or with no lock/refcnt on the layout when an
4811 * error is returned.
4812 * If a layout is passed in via lypp, it is locked (exclusively locked).
4813 */
4814 int
4815 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4816 nfsv4stateid_t *stateidp, int layouttype, int retonclose,
4817 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
4818 struct ucred *cred, NFSPROC_T *p)
4819 {
4820 struct nfsclclient *clp;
4821 struct nfscllayout *lyp, *tlyp;
4822 struct nfsclflayout *flp;
4823 struct nfsnode *np = VTONFS(vp);
4824 mount_t mp;
4825 int layout_passed_in;
4826
4827 mp = nmp->nm_mountp;
4828 layout_passed_in = 1;
4829 tlyp = NULL;
4830 lyp = *lypp;
4831 if (lyp == NULL) {
4832 layout_passed_in = 0;
4833 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
4834 M_WAITOK | M_ZERO);
4835 }
4836
4837 NFSLOCKCLSTATE();
4838 clp = nmp->nm_clp;
4839 if (clp == NULL) {
4840 if (layout_passed_in != 0)
4841 nfsv4_unlock(&lyp->nfsly_lock, 0);
4842 NFSUNLOCKCLSTATE();
4843 if (tlyp != NULL)
4844 free(tlyp, M_NFSLAYOUT);
4845 return (EPERM);
4846 }
4847 if (lyp == NULL) {
4848 /*
4849 * Although no lyp was passed in, another thread might have
4850 * allocated one. If one is found, just increment it's ref
4851 * count and return it.
4852 */
4853 lyp = nfscl_findlayout(clp, fhp, fhlen);
4854 if (lyp == NULL) {
4855 lyp = tlyp;
4856 tlyp = NULL;
4857 lyp->nfsly_stateid.seqid = stateidp->seqid;
4858 lyp->nfsly_stateid.other[0] = stateidp->other[0];
4859 lyp->nfsly_stateid.other[1] = stateidp->other[1];
4860 lyp->nfsly_stateid.other[2] = stateidp->other[2];
4861 lyp->nfsly_lastbyte = 0;
4862 LIST_INIT(&lyp->nfsly_flayread);
4863 LIST_INIT(&lyp->nfsly_flayrw);
4864 LIST_INIT(&lyp->nfsly_recall);
4865 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
4866 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
4867 lyp->nfsly_clp = clp;
4868 if (layouttype == NFSLAYOUT_FLEXFILE)
4869 lyp->nfsly_flags = NFSLY_FLEXFILE;
4870 else
4871 lyp->nfsly_flags = NFSLY_FILES;
4872 if (retonclose != 0)
4873 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4874 lyp->nfsly_fhlen = fhlen;
4875 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
4876 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4877 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
4878 nfsly_hash);
4879 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4880 nfscl_layoutcnt++;
4881 } else {
4882 if (retonclose != 0)
4883 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4884 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4885 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4886 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4887 }
4888 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
4889 if (NFSCL_FORCEDISM(mp)) {
4890 NFSUNLOCKCLSTATE();
4891 if (tlyp != NULL)
4892 free(tlyp, M_NFSLAYOUT);
4893 return (EPERM);
4894 }
4895 *lypp = lyp;
4896 } else
4897 lyp->nfsly_stateid.seqid = stateidp->seqid;
4898
4899 /* Merge the new list of File Layouts into the list. */
4900 flp = LIST_FIRST(fhlp);
4901 if (flp != NULL) {
4902 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
4903 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
4904 else
4905 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
4906 }
4907 if (layout_passed_in != 0)
4908 nfsv4_unlock(&lyp->nfsly_lock, 1);
4909 NFSUNLOCKCLSTATE();
4910 if (tlyp != NULL)
4911 free(tlyp, M_NFSLAYOUT);
4912 return (0);
4913 }
4914
4915 /*
4916 * Search for a layout by MDS file handle.
4917 * If one is found, it is returned with a refcnt (shared lock) iff
4918 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
4919 * returned NULL.
4920 */
4921 struct nfscllayout *
4922 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
4923 uint64_t off, struct nfsclflayout **retflpp, int *recalledp)
4924 {
4925 struct nfscllayout *lyp;
4926 mount_t mp;
4927 int error, igotlock;
4928
4929 mp = clp->nfsc_nmp->nm_mountp;
4930 *recalledp = 0;
4931 *retflpp = NULL;
4932 NFSLOCKCLSTATE();
4933 lyp = nfscl_findlayout(clp, fhp, fhlen);
4934 if (lyp != NULL) {
4935 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
4936 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4937 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4938 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4939 error = nfscl_findlayoutforio(lyp, off,
4940 NFSV4OPEN_ACCESSREAD, retflpp);
4941 if (error == 0)
4942 nfsv4_getref(&lyp->nfsly_lock, NULL,
4943 NFSCLSTATEMUTEXPTR, mp);
4944 else {
4945 do {
4946 igotlock = nfsv4_lock(&lyp->nfsly_lock,
4947 1, NULL, NFSCLSTATEMUTEXPTR, mp);
4948 } while (igotlock == 0 && !NFSCL_FORCEDISM(mp));
4949 *retflpp = NULL;
4950 }
4951 if (NFSCL_FORCEDISM(mp)) {
4952 lyp = NULL;
4953 *recalledp = 1;
4954 }
4955 } else {
4956 lyp = NULL;
4957 *recalledp = 1;
4958 }
4959 }
4960 NFSUNLOCKCLSTATE();
4961 return (lyp);
4962 }
4963
4964 /*
4965 * Search for a layout by MDS file handle. If one is found, mark in to be
4966 * recalled, if it already marked "return on close".
4967 */
4968 static void
4969 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
4970 int fhlen, struct nfsclrecalllayout **recallpp)
4971 {
4972 struct nfscllayout *lyp;
4973 uint32_t iomode;
4974
4975 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vp->v_mount)) ||
4976 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
4977 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
4978 return;
4979 lyp = nfscl_findlayout(clp, fhp, fhlen);
4980 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE |
4981 NFSLY_RECALL)) == NFSLY_RETONCLOSE) {
4982 iomode = 0;
4983 if (!LIST_EMPTY(&lyp->nfsly_flayread))
4984 iomode |= NFSLAYOUTIOMODE_READ;
4985 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
4986 iomode |= NFSLAYOUTIOMODE_RW;
4987 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
4988 0, UINT64_MAX, lyp->nfsly_stateid.seqid, 0, 0, NULL,
4989 *recallpp);
4990 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
4991 *recallpp = NULL;
4992 }
4993 }
4994
4995 /*
4996 * Mark the layout to be recalled and with an error.
4997 * Also, disable the dsp from further use.
4998 */
4999 void
5000 nfscl_dserr(uint32_t op, uint32_t stat, struct nfscldevinfo *dp,
5001 struct nfscllayout *lyp, struct nfsclds *dsp)
5002 {
5003 struct nfsclrecalllayout *recallp;
5004 uint32_t iomode;
5005
5006 printf("DS being disabled, error=%d\n", stat);
5007 /* Set up the return of the layout. */
5008 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
5009 iomode = 0;
5010 NFSLOCKCLSTATE();
5011 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
5012 if (!LIST_EMPTY(&lyp->nfsly_flayread))
5013 iomode |= NFSLAYOUTIOMODE_READ;
5014 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
5015 iomode |= NFSLAYOUTIOMODE_RW;
5016 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
5017 0, UINT64_MAX, lyp->nfsly_stateid.seqid, stat, op,
5018 dp->nfsdi_deviceid, recallp);
5019 NFSUNLOCKCLSTATE();
5020 NFSCL_DEBUG(4, "nfscl_dserr recall iomode=%d\n", iomode);
5021 } else {
5022 NFSUNLOCKCLSTATE();
5023 free(recallp, M_NFSLAYRECALL);
5024 }
5025
5026 /* And shut the TCP connection down. */
5027 nfscl_cancelreqs(dsp);
5028 }
5029
5030 /*
5031 * Cancel all RPCs for this "dsp" by closing the connection.
5032 * Also, mark the session as defunct.
5033 * If NFSCLDS_SAMECONN is set, the connection is shared with other DSs and
5034 * cannot be shut down.
5035 */
5036 void
5037 nfscl_cancelreqs(struct nfsclds *dsp)
5038 {
5039 struct __rpc_client *cl;
5040 static int non_event;
5041
5042 NFSLOCKDS(dsp);
5043 if ((dsp->nfsclds_flags & (NFSCLDS_CLOSED | NFSCLDS_SAMECONN)) == 0 &&
5044 dsp->nfsclds_sockp != NULL &&
5045 dsp->nfsclds_sockp->nr_client != NULL) {
5046 dsp->nfsclds_flags |= NFSCLDS_CLOSED;
5047 cl = dsp->nfsclds_sockp->nr_client;
5048 dsp->nfsclds_sess.nfsess_defunct = 1;
5049 NFSUNLOCKDS(dsp);
5050 CLNT_CLOSE(cl);
5051 /*
5052 * This 1sec sleep is done to reduce the number of reconnect
5053 * attempts made on the DS while it has failed.
5054 */
5055 tsleep(&non_event, PVFS, "ndscls", hz);
5056 return;
5057 }
5058 NFSUNLOCKDS(dsp);
5059 }
5060
5061 /*
5062 * Dereference a layout.
5063 */
5064 void
5065 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
5066 {
5067
5068 NFSLOCKCLSTATE();
5069 if (exclocked != 0)
5070 nfsv4_unlock(&lyp->nfsly_lock, 0);
5071 else
5072 nfsv4_relref(&lyp->nfsly_lock);
5073 NFSUNLOCKCLSTATE();
5074 }
5075
5076 /*
5077 * Search for a devinfo by deviceid. If one is found, return it after
5078 * acquiring a reference count on it.
5079 */
5080 struct nfscldevinfo *
5081 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
5082 struct nfscldevinfo *dip)
5083 {
5084
5085 NFSLOCKCLSTATE();
5086 if (dip == NULL)
5087 dip = nfscl_finddevinfo(clp, deviceid);
5088 if (dip != NULL)
5089 dip->nfsdi_refcnt++;
5090 NFSUNLOCKCLSTATE();
5091 return (dip);
5092 }
5093
5094 /*
5095 * Dereference a devinfo structure.
5096 */
5097 static void
5098 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
5099 {
5100
5101 dip->nfsdi_refcnt--;
5102 if (dip->nfsdi_refcnt == 0)
5103 wakeup(&dip->nfsdi_refcnt);
5104 }
5105
5106 /*
5107 * Dereference a devinfo structure.
5108 */
5109 void
5110 nfscl_reldevinfo(struct nfscldevinfo *dip)
5111 {
5112
5113 NFSLOCKCLSTATE();
5114 nfscl_reldevinfo_locked(dip);
5115 NFSUNLOCKCLSTATE();
5116 }
5117
5118 /*
5119 * Find a layout for this file handle. Return NULL upon failure.
5120 */
5121 static struct nfscllayout *
5122 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5123 {
5124 struct nfscllayout *lyp;
5125
5126 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5127 if (lyp->nfsly_fhlen == fhlen &&
5128 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5129 break;
5130 return (lyp);
5131 }
5132
5133 /*
5134 * Find a devinfo for this deviceid. Return NULL upon failure.
5135 */
5136 static struct nfscldevinfo *
5137 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5138 {
5139 struct nfscldevinfo *dip;
5140
5141 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5142 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5143 == 0)
5144 break;
5145 return (dip);
5146 }
5147
5148 /*
5149 * Merge the new file layout list into the main one, maintaining it in
5150 * increasing offset order.
5151 */
5152 static void
5153 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5154 struct nfsclflayouthead *newfhlp)
5155 {
5156 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5157
5158 flp = LIST_FIRST(fhlp);
5159 prevflp = NULL;
5160 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5161 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5162 prevflp = flp;
5163 flp = LIST_NEXT(flp, nfsfl_list);
5164 }
5165 if (prevflp == NULL)
5166 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5167 else
5168 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5169 prevflp = nflp;
5170 }
5171 }
5172
5173 /*
5174 * Add this nfscldevinfo to the client, if it doesn't already exist.
5175 * This function consumes the structure pointed at by dip, if not NULL.
5176 */
5177 int
5178 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip, int ind,
5179 struct nfsclflayout *flp)
5180 {
5181 struct nfsclclient *clp;
5182 struct nfscldevinfo *tdip;
5183 uint8_t *dev;
5184
5185 NFSLOCKCLSTATE();
5186 clp = nmp->nm_clp;
5187 if (clp == NULL) {
5188 NFSUNLOCKCLSTATE();
5189 if (dip != NULL)
5190 free(dip, M_NFSDEVINFO);
5191 return (ENODEV);
5192 }
5193 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5194 dev = flp->nfsfl_dev;
5195 else
5196 dev = flp->nfsfl_ffm[ind].dev;
5197 tdip = nfscl_finddevinfo(clp, dev);
5198 if (tdip != NULL) {
5199 tdip->nfsdi_layoutrefs++;
5200 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5201 flp->nfsfl_devp = tdip;
5202 else
5203 flp->nfsfl_ffm[ind].devp = tdip;
5204 nfscl_reldevinfo_locked(tdip);
5205 NFSUNLOCKCLSTATE();
5206 if (dip != NULL)
5207 free(dip, M_NFSDEVINFO);
5208 return (0);
5209 }
5210 if (dip != NULL) {
5211 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5212 dip->nfsdi_layoutrefs = 1;
5213 if ((flp->nfsfl_flags & NFSFL_FILE) != 0)
5214 flp->nfsfl_devp = dip;
5215 else
5216 flp->nfsfl_ffm[ind].devp = dip;
5217 }
5218 NFSUNLOCKCLSTATE();
5219 if (dip == NULL)
5220 return (ENODEV);
5221 return (0);
5222 }
5223
5224 /*
5225 * Free up a layout structure and associated file layout structure(s).
5226 */
5227 void
5228 nfscl_freelayout(struct nfscllayout *layp)
5229 {
5230 struct nfsclflayout *flp, *nflp;
5231 struct nfsclrecalllayout *rp, *nrp;
5232
5233 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5234 LIST_REMOVE(flp, nfsfl_list);
5235 nfscl_freeflayout(flp);
5236 }
5237 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5238 LIST_REMOVE(flp, nfsfl_list);
5239 nfscl_freeflayout(flp);
5240 }
5241 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5242 LIST_REMOVE(rp, nfsrecly_list);
5243 free(rp, M_NFSLAYRECALL);
5244 }
5245 nfscl_layoutcnt--;
5246 free(layp, M_NFSLAYOUT);
5247 }
5248
5249 /*
5250 * Free up a file layout structure.
5251 */
5252 void
5253 nfscl_freeflayout(struct nfsclflayout *flp)
5254 {
5255 int i, j;
5256
5257 if ((flp->nfsfl_flags & NFSFL_FILE) != 0) {
5258 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5259 free(flp->nfsfl_fh[i], M_NFSFH);
5260 if (flp->nfsfl_devp != NULL)
5261 flp->nfsfl_devp->nfsdi_layoutrefs--;
5262 }
5263 if ((flp->nfsfl_flags & NFSFL_FLEXFILE) != 0)
5264 for (i = 0; i < flp->nfsfl_mirrorcnt; i++) {
5265 for (j = 0; j < flp->nfsfl_ffm[i].fhcnt; j++)
5266 free(flp->nfsfl_ffm[i].fh[j], M_NFSFH);
5267 if (flp->nfsfl_ffm[i].devp != NULL)
5268 flp->nfsfl_ffm[i].devp->nfsdi_layoutrefs--;
5269 }
5270 free(flp, M_NFSFLAYOUT);
5271 }
5272
5273 /*
5274 * Free up a file layout devinfo structure.
5275 */
5276 void
5277 nfscl_freedevinfo(struct nfscldevinfo *dip)
5278 {
5279
5280 free(dip, M_NFSDEVINFO);
5281 }
5282
5283 /*
5284 * Mark any layouts that match as recalled.
5285 */
5286 static int
5287 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5288 uint64_t off, uint64_t len, uint32_t stateseqid, uint32_t stat, uint32_t op,
5289 char *devid, struct nfsclrecalllayout *recallp)
5290 {
5291 struct nfsclrecalllayout *rp, *orp;
5292
5293 recallp->nfsrecly_recalltype = recalltype;
5294 recallp->nfsrecly_iomode = iomode;
5295 recallp->nfsrecly_stateseqid = stateseqid;
5296 recallp->nfsrecly_off = off;
5297 recallp->nfsrecly_len = len;
5298 recallp->nfsrecly_stat = stat;
5299 recallp->nfsrecly_op = op;
5300 if (devid != NULL)
5301 NFSBCOPY(devid, recallp->nfsrecly_devid, NFSX_V4DEVICEID);
5302 /*
5303 * Order the list as file returns first, followed by fsid and any
5304 * returns, both in increasing stateseqid order.
5305 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5306 * (I'm not sure this is correct because I find RFC5661 confusing
5307 * on this, but hopefully it will work ok.)
5308 */
5309 orp = NULL;
5310 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5311 orp = rp;
5312 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5313 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5314 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5315 (recalltype != NFSLAYOUTRETURN_FILE &&
5316 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5317 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5318 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5319 break;
5320 }
5321
5322 /*
5323 * Put any error return on all the file returns that will
5324 * preceed this one.
5325 */
5326 if (rp->nfsrecly_recalltype == NFSLAYOUTRETURN_FILE &&
5327 stat != 0 && rp->nfsrecly_stat == 0) {
5328 rp->nfsrecly_stat = stat;
5329 rp->nfsrecly_op = op;
5330 if (devid != NULL)
5331 NFSBCOPY(devid, rp->nfsrecly_devid,
5332 NFSX_V4DEVICEID);
5333 }
5334 }
5335 if (rp == NULL) {
5336 if (orp == NULL)
5337 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5338 nfsrecly_list);
5339 else
5340 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5341 }
5342 lyp->nfsly_flags |= NFSLY_RECALL;
5343 wakeup(lyp->nfsly_clp);
5344 return (0);
5345 }
5346
5347 /*
5348 * Compare the two seqids for ordering. The trick is that the seqids can
5349 * wrap around from 0xffffffff->0, so check for the cases where one
5350 * has wrapped around.
5351 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5352 */
5353 static int
5354 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5355 {
5356
5357 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5358 /* seqid2 has wrapped around. */
5359 return (0);
5360 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5361 /* seqid1 has wrapped around. */
5362 return (1);
5363 if (seqid1 <= seqid2)
5364 return (1);
5365 return (0);
5366 }
5367
5368 /*
5369 * Do a layout return for each of the recalls.
5370 */
5371 static void
5372 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5373 struct ucred *cred, NFSPROC_T *p)
5374 {
5375 struct nfsclrecalllayout *rp;
5376 nfsv4stateid_t stateid;
5377 int layouttype;
5378
5379 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5380 stateid.seqid = lyp->nfsly_stateid.seqid;
5381 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5382 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5383 else
5384 layouttype = NFSLAYOUT_FLEXFILE;
5385 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5386 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5387 lyp->nfsly_fhlen, 0, layouttype,
5388 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5389 rp->nfsrecly_off, rp->nfsrecly_len,
5390 &stateid, cred, p, rp->nfsrecly_stat, rp->nfsrecly_op,
5391 rp->nfsrecly_devid);
5392 }
5393 }
5394
5395 /*
5396 * Do the layout commit for a file layout.
5397 */
5398 static void
5399 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5400 struct ucred *cred, NFSPROC_T *p)
5401 {
5402 struct nfsclflayout *flp;
5403 uint64_t len;
5404 int error, layouttype;
5405
5406 if ((lyp->nfsly_flags & NFSLY_FILES) != 0)
5407 layouttype = NFSLAYOUT_NFSV4_1_FILES;
5408 else
5409 layouttype = NFSLAYOUT_FLEXFILE;
5410 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5411 if (layouttype == NFSLAYOUT_FLEXFILE &&
5412 (flp->nfsfl_fflags & NFSFLEXFLAG_NO_LAYOUTCOMMIT) != 0) {
5413 NFSCL_DEBUG(4, "Flex file: no layoutcommit\n");
5414 /* If not supported, don't bother doing it. */
5415 NFSLOCKMNT(nmp);
5416 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5417 NFSUNLOCKMNT(nmp);
5418 break;
5419 } else if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5420 len = flp->nfsfl_end - flp->nfsfl_off;
5421 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5422 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5423 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5424 layouttype, cred, p, NULL);
5425 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5426 if (error == NFSERR_NOTSUPP) {
5427 /* If not supported, don't bother doing it. */
5428 NFSLOCKMNT(nmp);
5429 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5430 NFSUNLOCKMNT(nmp);
5431 break;
5432 }
5433 }
5434 }
5435 }
5436
5437 /*
5438 * Commit all layouts for a file (vnode).
5439 */
5440 int
5441 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5442 {
5443 struct nfsclclient *clp;
5444 struct nfscllayout *lyp;
5445 struct nfsnode *np = VTONFS(vp);
5446 mount_t mp;
5447 struct nfsmount *nmp;
5448
5449 mp = vp->v_mount;
5450 nmp = VFSTONFS(mp);
5451 if (NFSHASNOLAYOUTCOMMIT(nmp))
5452 return (0);
5453 NFSLOCKCLSTATE();
5454 clp = nmp->nm_clp;
5455 if (clp == NULL) {
5456 NFSUNLOCKCLSTATE();
5457 return (EPERM);
5458 }
5459 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5460 if (lyp == NULL) {
5461 NFSUNLOCKCLSTATE();
5462 return (EPERM);
5463 }
5464 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5465 if (NFSCL_FORCEDISM(mp)) {
5466 NFSUNLOCKCLSTATE();
5467 return (EPERM);
5468 }
5469 tryagain:
5470 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5471 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5472 NFSUNLOCKCLSTATE();
5473 NFSCL_DEBUG(4, "do layoutcommit2\n");
5474 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5475 NFSLOCKCLSTATE();
5476 goto tryagain;
5477 }
5478 nfsv4_relref(&lyp->nfsly_lock);
5479 NFSUNLOCKCLSTATE();
5480 return (0);
5481 }
Cache object: 7a236d0468b99895699c1fa3b9d1b2b5
|