1 /*-
2 * Copyright (c) 2009 Rick Macklem, University of Guelph
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.4/sys/fs/nfsclient/nfs_clstate.c 320795 2017-07-07 21:33:06Z rmacklem $");
30
31 /*
32 * These functions implement the client side state handling for NFSv4.
33 * NFSv4 state handling:
34 * - A lockowner is used to determine lock contention, so it
35 * corresponds directly to a Posix pid. (1 to 1 mapping)
36 * - The correct granularity of an OpenOwner is not nearly so
37 * obvious. An OpenOwner does the following:
38 * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner
39 * - is used to check for Open/Share contention (not applicable to
40 * this client, since all Opens are Deny_None)
41 * As such, I considered both extreme.
42 * 1 OpenOwner per ClientID - Simple to manage, but fully serializes
43 * all Open, Close and Lock (with a new lockowner) Ops.
44 * 1 OpenOwner for each Open - This one results in an OpenConfirm for
45 * every Open, for most servers.
46 * So, I chose to use the same mapping as I did for LockOwnwers.
47 * The main concern here is that you can end up with multiple Opens
48 * for the same File Handle, but on different OpenOwners (opens
49 * inherited from parents, grandparents...) and you do not know
50 * which of these the vnodeop close applies to. This is handled by
51 * delaying the Close Op(s) until all of the Opens have been closed.
52 * (It is not yet obvious if this is the correct granularity.)
53 * - How the code handles serialization:
54 * - For the ClientId, it uses an exclusive lock while getting its
55 * SetClientId and during recovery. Otherwise, it uses a shared
56 * lock via a reference count.
57 * - For the rest of the data structures, it uses an SMP mutex
58 * (once the nfs client is SMP safe) and doesn't sleep while
59 * manipulating the linked lists.
60 * - The serialization of Open/Close/Lock/LockU falls out in the
61 * "wash", since OpenOwners and LockOwners are both mapped from
62 * Posix pid. In other words, there is only one Posix pid using
63 * any given owner, so that owner is serialized. (If you change
64 * the granularity of the OpenOwner, then code must be added to
65 * serialize Ops on the OpenOwner.)
66 * - When to get rid of OpenOwners and LockOwners.
67 * - The function nfscl_cleanup_common() is executed after a process exits.
68 * It goes through the client list looking for all Open and Lock Owners.
69 * When one is found, it is marked "defunct" or in the case of
70 * an OpenOwner without any Opens, freed.
71 * The renew thread scans for defunct Owners and gets rid of them,
72 * if it can. The LockOwners will also be deleted when the
73 * associated Open is closed.
74 * - If the LockU or Close Op(s) fail during close in a way
75 * that could be recovered upon retry, they are relinked to the
76 * ClientId's defunct open list and retried by the renew thread
77 * until they succeed or an unmount/recovery occurs.
78 * (Since we are done with them, they do not need to be recovered.)
79 */
80
81 #ifndef APPLEKEXT
82 #include <fs/nfs/nfsport.h>
83
84 /*
85 * Global variables
86 */
87 extern struct nfsstats newnfsstats;
88 extern struct nfsreqhead nfsd_reqq;
89 extern u_int32_t newnfs_false, newnfs_true;
90 extern int nfscl_debuglevel;
91 extern int nfscl_enablecallb;
92 extern int nfs_numnfscbd;
93 NFSREQSPINLOCK;
94 NFSCLSTATEMUTEX;
95 int nfscl_inited = 0;
96 struct nfsclhead nfsclhead; /* Head of clientid list */
97 int nfscl_deleghighwater = NFSCLDELEGHIGHWATER;
98 int nfscl_layouthighwater = NFSCLLAYOUTHIGHWATER;
99 #endif /* !APPLEKEXT */
100
101 static int nfscl_delegcnt = 0;
102 static int nfscl_layoutcnt = 0;
103 static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *,
104 u_int8_t *, u_int32_t, struct nfscllockowner **, struct nfsclopen **);
105 static void nfscl_clrelease(struct nfsclclient *);
106 static void nfscl_cleanclient(struct nfsclclient *);
107 static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *,
108 struct ucred *, NFSPROC_T *);
109 static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *,
110 struct nfsmount *, struct ucred *, NFSPROC_T *);
111 static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *);
112 static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *,
113 struct nfscllock *, int);
114 static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **,
115 struct nfscllock **, int);
116 static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *);
117 static u_int32_t nfscl_nextcbident(void);
118 static mount_t nfscl_getmnt(int, uint8_t *, u_int32_t, struct nfsclclient **);
119 static struct nfsclclient *nfscl_getclnt(u_int32_t);
120 static struct nfsclclient *nfscl_getclntsess(uint8_t *);
121 static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *,
122 int);
123 static void nfscl_retoncloselayout(vnode_t, struct nfsclclient *, uint8_t *,
124 int, struct nfsclrecalllayout **);
125 static void nfscl_reldevinfo_locked(struct nfscldevinfo *);
126 static struct nfscllayout *nfscl_findlayout(struct nfsclclient *, u_int8_t *,
127 int);
128 static struct nfscldevinfo *nfscl_finddevinfo(struct nfsclclient *, uint8_t *);
129 static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *,
130 u_int8_t *, struct nfscllock **);
131 static void nfscl_freealllocks(struct nfscllockownerhead *, int);
132 static int nfscl_localconflict(struct nfsclclient *, u_int8_t *, int,
133 struct nfscllock *, u_int8_t *, struct nfscldeleg *, struct nfscllock **);
134 static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *,
135 struct nfsclowner **, struct nfsclowner **, struct nfsclopen **,
136 struct nfsclopen **, u_int8_t *, u_int8_t *, int, struct ucred *, int *);
137 static int nfscl_moveopen(vnode_t , struct nfsclclient *,
138 struct nfsmount *, struct nfsclopen *, struct nfsclowner *,
139 struct nfscldeleg *, struct ucred *, NFSPROC_T *);
140 static void nfscl_totalrecall(struct nfsclclient *);
141 static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *,
142 struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *);
143 static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int,
144 u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int,
145 struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *);
146 static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *,
147 int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short,
148 struct ucred *, NFSPROC_T *);
149 static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
150 struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *);
151 static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *);
152 static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
153 static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
154 static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
155 struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int);
156 static void nfscl_freeopenowner(struct nfsclowner *, int);
157 static void nfscl_cleandeleg(struct nfscldeleg *);
158 static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *,
159 struct nfsmount *, NFSPROC_T *);
160 static void nfscl_emptylockowner(struct nfscllockowner *,
161 struct nfscllockownerfhhead *);
162 static void nfscl_mergeflayouts(struct nfsclflayouthead *,
163 struct nfsclflayouthead *);
164 static int nfscl_layoutrecall(int, struct nfscllayout *, uint32_t, uint64_t,
165 uint64_t, uint32_t, struct nfsclrecalllayout *);
166 static int nfscl_seq(uint32_t, uint32_t);
167 static void nfscl_layoutreturn(struct nfsmount *, struct nfscllayout *,
168 struct ucred *, NFSPROC_T *);
169 static void nfscl_dolayoutcommit(struct nfsmount *, struct nfscllayout *,
170 struct ucred *, NFSPROC_T *);
171
172 static short nfscberr_null[] = {
173 0,
174 0,
175 };
176
177 static short nfscberr_getattr[] = {
178 NFSERR_RESOURCE,
179 NFSERR_BADHANDLE,
180 NFSERR_BADXDR,
181 NFSERR_RESOURCE,
182 NFSERR_SERVERFAULT,
183 0,
184 };
185
186 static short nfscberr_recall[] = {
187 NFSERR_RESOURCE,
188 NFSERR_BADHANDLE,
189 NFSERR_BADSTATEID,
190 NFSERR_BADXDR,
191 NFSERR_RESOURCE,
192 NFSERR_SERVERFAULT,
193 0,
194 };
195
196 static short *nfscl_cberrmap[] = {
197 nfscberr_null,
198 nfscberr_null,
199 nfscberr_null,
200 nfscberr_getattr,
201 nfscberr_recall
202 };
203
204 #define NETFAMILY(clp) \
205 (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET)
206
207 /*
208 * Called for an open operation.
209 * If the nfhp argument is NULL, just get an openowner.
210 */
211 APPLESTATIC int
212 nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg,
213 struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp,
214 struct nfsclopen **opp, int *newonep, int *retp, int lockit)
215 {
216 struct nfsclclient *clp;
217 struct nfsclowner *owp, *nowp;
218 struct nfsclopen *op = NULL, *nop = NULL;
219 struct nfscldeleg *dp;
220 struct nfsclownerhead *ohp;
221 u_int8_t own[NFSV4CL_LOCKNAMELEN];
222 int ret;
223
224 if (newonep != NULL)
225 *newonep = 0;
226 if (opp != NULL)
227 *opp = NULL;
228 if (owpp != NULL)
229 *owpp = NULL;
230
231 /*
232 * Might need one or both of these, so MALLOC them now, to
233 * avoid a tsleep() in MALLOC later.
234 */
235 MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner),
236 M_NFSCLOWNER, M_WAITOK);
237 if (nfhp != NULL)
238 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
239 fhlen - 1, M_NFSCLOPEN, M_WAITOK);
240 ret = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
241 if (ret != 0) {
242 FREE((caddr_t)nowp, M_NFSCLOWNER);
243 if (nop != NULL)
244 FREE((caddr_t)nop, M_NFSCLOPEN);
245 return (ret);
246 }
247
248 /*
249 * Get the Open iff it already exists.
250 * If none found, add the new one or return error, depending upon
251 * "create".
252 */
253 NFSLOCKCLSTATE();
254 dp = NULL;
255 /* First check the delegation list */
256 if (nfhp != NULL && usedeleg) {
257 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
258 if (dp->nfsdl_fhlen == fhlen &&
259 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
260 if (!(amode & NFSV4OPEN_ACCESSWRITE) ||
261 (dp->nfsdl_flags & NFSCLDL_WRITE))
262 break;
263 dp = NULL;
264 break;
265 }
266 }
267 }
268
269 if (dp != NULL) {
270 nfscl_filllockowner(p->td_proc, own, F_POSIX);
271 ohp = &dp->nfsdl_owner;
272 } else {
273 /* For NFSv4.1 and this option, use a single open_owner. */
274 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
275 nfscl_filllockowner(NULL, own, F_POSIX);
276 else
277 nfscl_filllockowner(p->td_proc, own, F_POSIX);
278 ohp = &clp->nfsc_owner;
279 }
280 /* Now, search for an openowner */
281 LIST_FOREACH(owp, ohp, nfsow_list) {
282 if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN))
283 break;
284 }
285
286 /*
287 * Create a new open, as required.
288 */
289 nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen,
290 cred, newonep);
291
292 /*
293 * Now, check the mode on the open and return the appropriate
294 * value.
295 */
296 if (retp != NULL) {
297 if (nfhp != NULL && dp != NULL && nop == NULL)
298 /* new local open on delegation */
299 *retp = NFSCLOPEN_SETCRED;
300 else
301 *retp = NFSCLOPEN_OK;
302 }
303 if (op != NULL && (amode & ~(op->nfso_mode))) {
304 op->nfso_mode |= amode;
305 if (retp != NULL && dp == NULL)
306 *retp = NFSCLOPEN_DOOPEN;
307 }
308
309 /*
310 * Serialize modifications to the open owner for multiple threads
311 * within the same process using a read/write sleep lock.
312 * For NFSv4.1 and a single OpenOwner, allow concurrent open operations
313 * by acquiring a shared lock. The close operations still use an
314 * exclusive lock for this case.
315 */
316 if (lockit != 0) {
317 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp)))) {
318 /*
319 * Get a shared lock on the OpenOwner, but first
320 * wait for any pending exclusive lock, so that the
321 * exclusive locker gets priority.
322 */
323 nfsv4_lock(&owp->nfsow_rwlock, 0, NULL,
324 NFSCLSTATEMUTEXPTR, NULL);
325 nfsv4_getref(&owp->nfsow_rwlock, NULL,
326 NFSCLSTATEMUTEXPTR, NULL);
327 } else
328 nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR);
329 }
330 NFSUNLOCKCLSTATE();
331 if (nowp != NULL)
332 FREE((caddr_t)nowp, M_NFSCLOWNER);
333 if (nop != NULL)
334 FREE((caddr_t)nop, M_NFSCLOPEN);
335 if (owpp != NULL)
336 *owpp = owp;
337 if (opp != NULL)
338 *opp = op;
339 return (0);
340 }
341
342 /*
343 * Create a new open, as required.
344 */
345 static void
346 nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
347 struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp,
348 struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen,
349 struct ucred *cred, int *newonep)
350 {
351 struct nfsclowner *owp = *owpp, *nowp;
352 struct nfsclopen *op, *nop;
353
354 if (nowpp != NULL)
355 nowp = *nowpp;
356 else
357 nowp = NULL;
358 if (nopp != NULL)
359 nop = *nopp;
360 else
361 nop = NULL;
362 if (owp == NULL && nowp != NULL) {
363 NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
364 LIST_INIT(&nowp->nfsow_open);
365 nowp->nfsow_clp = clp;
366 nowp->nfsow_seqid = 0;
367 nowp->nfsow_defunct = 0;
368 nfscl_lockinit(&nowp->nfsow_rwlock);
369 if (dp != NULL) {
370 newnfsstats.cllocalopenowners++;
371 LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
372 } else {
373 newnfsstats.clopenowners++;
374 LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
375 }
376 owp = *owpp = nowp;
377 *nowpp = NULL;
378 if (newonep != NULL)
379 *newonep = 1;
380 }
381
382 /* If an fhp has been specified, create an Open as well. */
383 if (fhp != NULL) {
384 /* and look for the correct open, based upon FH */
385 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
386 if (op->nfso_fhlen == fhlen &&
387 !NFSBCMP(op->nfso_fh, fhp, fhlen))
388 break;
389 }
390 if (op == NULL && nop != NULL) {
391 nop->nfso_own = owp;
392 nop->nfso_mode = 0;
393 nop->nfso_opencnt = 0;
394 nop->nfso_posixlock = 1;
395 nop->nfso_fhlen = fhlen;
396 NFSBCOPY(fhp, nop->nfso_fh, fhlen);
397 LIST_INIT(&nop->nfso_lock);
398 nop->nfso_stateid.seqid = 0;
399 nop->nfso_stateid.other[0] = 0;
400 nop->nfso_stateid.other[1] = 0;
401 nop->nfso_stateid.other[2] = 0;
402 KASSERT(cred != NULL, ("%s: cred NULL\n", __func__));
403 newnfs_copyincred(cred, &nop->nfso_cred);
404 if (dp != NULL) {
405 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
406 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
407 nfsdl_list);
408 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
409 newnfsstats.cllocalopens++;
410 } else {
411 newnfsstats.clopens++;
412 }
413 LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
414 *opp = nop;
415 *nopp = NULL;
416 if (newonep != NULL)
417 *newonep = 1;
418 } else {
419 *opp = op;
420 }
421 }
422 }
423
424 /*
425 * Called to find/add a delegation to a client.
426 */
427 APPLESTATIC int
428 nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
429 int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp)
430 {
431 struct nfscldeleg *dp = *dpp, *tdp;
432
433 /*
434 * First, if we have received a Read delegation for a file on a
435 * read/write file system, just return it, because they aren't
436 * useful, imho.
437 */
438 if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) &&
439 (dp->nfsdl_flags & NFSCLDL_READ)) {
440 (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p);
441 FREE((caddr_t)dp, M_NFSCLDELEG);
442 *dpp = NULL;
443 return (0);
444 }
445
446 /* Look for the correct deleg, based upon FH */
447 NFSLOCKCLSTATE();
448 tdp = nfscl_finddeleg(clp, nfhp, fhlen);
449 if (tdp == NULL) {
450 if (dp == NULL) {
451 NFSUNLOCKCLSTATE();
452 return (NFSERR_BADSTATEID);
453 }
454 *dpp = NULL;
455 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
456 LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
457 nfsdl_hash);
458 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
459 newnfsstats.cldelegates++;
460 nfscl_delegcnt++;
461 } else {
462 /*
463 * Delegation already exists, what do we do if a new one??
464 */
465 if (dp != NULL) {
466 printf("Deleg already exists!\n");
467 FREE((caddr_t)dp, M_NFSCLDELEG);
468 *dpp = NULL;
469 } else {
470 *dpp = tdp;
471 }
472 }
473 NFSUNLOCKCLSTATE();
474 return (0);
475 }
476
477 /*
478 * Find a delegation for this file handle. Return NULL upon failure.
479 */
480 static struct nfscldeleg *
481 nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
482 {
483 struct nfscldeleg *dp;
484
485 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) {
486 if (dp->nfsdl_fhlen == fhlen &&
487 !NFSBCMP(dp->nfsdl_fh, fhp, fhlen))
488 break;
489 }
490 return (dp);
491 }
492
493 /*
494 * Get a stateid for an I/O operation. First, look for an open and iff
495 * found, return either a lockowner stateid or the open stateid.
496 * If no Open is found, just return error and the special stateid of all zeros.
497 */
498 APPLESTATIC int
499 nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode,
500 int fords, struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp,
501 void **lckpp)
502 {
503 struct nfsclclient *clp;
504 struct nfsclowner *owp;
505 struct nfsclopen *op = NULL, *top;
506 struct nfscllockowner *lp;
507 struct nfscldeleg *dp;
508 struct nfsnode *np;
509 struct nfsmount *nmp;
510 u_int8_t own[NFSV4CL_LOCKNAMELEN];
511 int error, done;
512
513 *lckpp = NULL;
514 /*
515 * Initially, just set the special stateid of all zeros.
516 * (Don't do this for a DS, since the special stateid can't be used.)
517 */
518 if (fords == 0) {
519 stateidp->seqid = 0;
520 stateidp->other[0] = 0;
521 stateidp->other[1] = 0;
522 stateidp->other[2] = 0;
523 }
524 if (vnode_vtype(vp) != VREG)
525 return (EISDIR);
526 np = VTONFS(vp);
527 nmp = VFSTONFS(vnode_mount(vp));
528 NFSLOCKCLSTATE();
529 clp = nfscl_findcl(nmp);
530 if (clp == NULL) {
531 NFSUNLOCKCLSTATE();
532 return (EACCES);
533 }
534
535 /*
536 * Wait for recovery to complete.
537 */
538 while ((clp->nfsc_flags & NFSCLFLAGS_RECVRINPROG))
539 (void) nfsmsleep(&clp->nfsc_flags, NFSCLSTATEMUTEXPTR,
540 PZERO, "nfsrecvr", NULL);
541
542 /*
543 * First, look for a delegation.
544 */
545 LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) {
546 if (dp->nfsdl_fhlen == fhlen &&
547 !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) {
548 if (!(mode & NFSV4OPEN_ACCESSWRITE) ||
549 (dp->nfsdl_flags & NFSCLDL_WRITE)) {
550 stateidp->seqid = dp->nfsdl_stateid.seqid;
551 stateidp->other[0] = dp->nfsdl_stateid.other[0];
552 stateidp->other[1] = dp->nfsdl_stateid.other[1];
553 stateidp->other[2] = dp->nfsdl_stateid.other[2];
554 if (!(np->n_flag & NDELEGRECALL)) {
555 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
556 nfsdl_list);
557 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
558 nfsdl_list);
559 dp->nfsdl_timestamp = NFSD_MONOSEC +
560 120;
561 dp->nfsdl_rwlock.nfslock_usecnt++;
562 *lckpp = (void *)&dp->nfsdl_rwlock;
563 }
564 NFSUNLOCKCLSTATE();
565 return (0);
566 }
567 break;
568 }
569 }
570
571 if (p != NULL) {
572 /*
573 * If p != NULL, we want to search the parentage tree
574 * for a matching OpenOwner and use that.
575 */
576 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
577 nfscl_filllockowner(NULL, own, F_POSIX);
578 else
579 nfscl_filllockowner(p->td_proc, own, F_POSIX);
580 lp = NULL;
581 error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, own, own,
582 mode, &lp, &op);
583 if (error == 0 && lp != NULL && fords == 0) {
584 /* Don't return a lock stateid for a DS. */
585 stateidp->seqid =
586 lp->nfsl_stateid.seqid;
587 stateidp->other[0] =
588 lp->nfsl_stateid.other[0];
589 stateidp->other[1] =
590 lp->nfsl_stateid.other[1];
591 stateidp->other[2] =
592 lp->nfsl_stateid.other[2];
593 NFSUNLOCKCLSTATE();
594 return (0);
595 }
596 }
597 if (op == NULL) {
598 /* If not found, just look for any OpenOwner that will work. */
599 top = NULL;
600 done = 0;
601 owp = LIST_FIRST(&clp->nfsc_owner);
602 while (!done && owp != NULL) {
603 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
604 if (op->nfso_fhlen == fhlen &&
605 !NFSBCMP(op->nfso_fh, nfhp, fhlen)) {
606 if (top == NULL && (op->nfso_mode &
607 NFSV4OPEN_ACCESSWRITE) != 0 &&
608 (mode & NFSV4OPEN_ACCESSREAD) != 0)
609 top = op;
610 if ((mode & op->nfso_mode) == mode) {
611 done = 1;
612 break;
613 }
614 }
615 }
616 if (!done)
617 owp = LIST_NEXT(owp, nfsow_list);
618 }
619 if (!done) {
620 NFSCL_DEBUG(2, "openmode top=%p\n", top);
621 if (top == NULL || NFSHASOPENMODE(nmp)) {
622 NFSUNLOCKCLSTATE();
623 return (ENOENT);
624 } else
625 op = top;
626 }
627 /*
628 * For read aheads or write behinds, use the open cred.
629 * A read ahead or write behind is indicated by p == NULL.
630 */
631 if (p == NULL)
632 newnfs_copycred(&op->nfso_cred, cred);
633 }
634
635 /*
636 * No lock stateid, so return the open stateid.
637 */
638 stateidp->seqid = op->nfso_stateid.seqid;
639 stateidp->other[0] = op->nfso_stateid.other[0];
640 stateidp->other[1] = op->nfso_stateid.other[1];
641 stateidp->other[2] = op->nfso_stateid.other[2];
642 NFSUNLOCKCLSTATE();
643 return (0);
644 }
645
646 /*
647 * Search for a matching file, mode and, optionally, lockowner.
648 */
649 static int
650 nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen,
651 u_int8_t *openown, u_int8_t *lockown, u_int32_t mode,
652 struct nfscllockowner **lpp, struct nfsclopen **opp)
653 {
654 struct nfsclowner *owp;
655 struct nfsclopen *op, *rop, *rop2;
656 struct nfscllockowner *lp;
657 int keep_looping;
658
659 if (lpp != NULL)
660 *lpp = NULL;
661 /*
662 * rop will be set to the open to be returned. There are three
663 * variants of this, all for an open of the correct file:
664 * 1 - A match of lockown.
665 * 2 - A match of the openown, when no lockown match exists.
666 * 3 - A match for any open, if no openown or lockown match exists.
667 * Looking for #2 over #3 probably isn't necessary, but since
668 * RFC3530 is vague w.r.t. the relationship between openowners and
669 * lockowners, I think this is the safer way to go.
670 */
671 rop = NULL;
672 rop2 = NULL;
673 keep_looping = 1;
674 /* Search the client list */
675 owp = LIST_FIRST(ohp);
676 while (owp != NULL && keep_looping != 0) {
677 /* and look for the correct open */
678 op = LIST_FIRST(&owp->nfsow_open);
679 while (op != NULL && keep_looping != 0) {
680 if (op->nfso_fhlen == fhlen &&
681 !NFSBCMP(op->nfso_fh, nfhp, fhlen)
682 && (op->nfso_mode & mode) == mode) {
683 if (lpp != NULL) {
684 /* Now look for a matching lockowner. */
685 LIST_FOREACH(lp, &op->nfso_lock,
686 nfsl_list) {
687 if (!NFSBCMP(lp->nfsl_owner,
688 lockown,
689 NFSV4CL_LOCKNAMELEN)) {
690 *lpp = lp;
691 rop = op;
692 keep_looping = 0;
693 break;
694 }
695 }
696 }
697 if (rop == NULL && !NFSBCMP(owp->nfsow_owner,
698 openown, NFSV4CL_LOCKNAMELEN)) {
699 rop = op;
700 if (lpp == NULL)
701 keep_looping = 0;
702 }
703 if (rop2 == NULL)
704 rop2 = op;
705 }
706 op = LIST_NEXT(op, nfso_list);
707 }
708 owp = LIST_NEXT(owp, nfsow_list);
709 }
710 if (rop == NULL)
711 rop = rop2;
712 if (rop == NULL)
713 return (EBADF);
714 *opp = rop;
715 return (0);
716 }
717
718 /*
719 * Release use of an open owner. Called when open operations are done
720 * with the open owner.
721 */
722 APPLESTATIC void
723 nfscl_ownerrelease(struct nfsmount *nmp, struct nfsclowner *owp,
724 __unused int error, __unused int candelete, int unlocked)
725 {
726
727 if (owp == NULL)
728 return;
729 NFSLOCKCLSTATE();
730 if (unlocked == 0) {
731 if (NFSHASONEOPENOWN(nmp))
732 nfsv4_relref(&owp->nfsow_rwlock);
733 else
734 nfscl_lockunlock(&owp->nfsow_rwlock);
735 }
736 nfscl_clrelease(owp->nfsow_clp);
737 NFSUNLOCKCLSTATE();
738 }
739
740 /*
741 * Release use of an open structure under an open owner.
742 */
743 APPLESTATIC void
744 nfscl_openrelease(struct nfsmount *nmp, struct nfsclopen *op, int error,
745 int candelete)
746 {
747 struct nfsclclient *clp;
748 struct nfsclowner *owp;
749
750 if (op == NULL)
751 return;
752 NFSLOCKCLSTATE();
753 owp = op->nfso_own;
754 if (NFSHASONEOPENOWN(nmp))
755 nfsv4_relref(&owp->nfsow_rwlock);
756 else
757 nfscl_lockunlock(&owp->nfsow_rwlock);
758 clp = owp->nfsow_clp;
759 if (error && candelete && op->nfso_opencnt == 0)
760 nfscl_freeopen(op, 0);
761 nfscl_clrelease(clp);
762 NFSUNLOCKCLSTATE();
763 }
764
765 /*
766 * Called to get a clientid structure. It will optionally lock the
767 * client data structures to do the SetClientId/SetClientId_confirm,
768 * but will release that lock and return the clientid with a refernce
769 * count on it.
770 * If the "cred" argument is NULL, a new clientid should not be created.
771 * If the "p" argument is NULL, a SetClientID/SetClientIDConfirm cannot
772 * be done.
773 * The start_renewthread argument tells nfscl_getcl() to start a renew
774 * thread if this creates a new clp.
775 * It always clpp with a reference count on it, unless returning an error.
776 */
777 APPLESTATIC int
778 nfscl_getcl(struct mount *mp, struct ucred *cred, NFSPROC_T *p,
779 int start_renewthread, struct nfsclclient **clpp)
780 {
781 struct nfsclclient *clp;
782 struct nfsclclient *newclp = NULL;
783 struct nfsmount *nmp;
784 char uuid[HOSTUUIDLEN];
785 int igotlock = 0, error, trystalecnt, clidinusedelay, i;
786 u_int16_t idlen = 0;
787
788 nmp = VFSTONFS(mp);
789 if (cred != NULL) {
790 getcredhostuuid(cred, uuid, sizeof uuid);
791 idlen = strlen(uuid);
792 if (idlen > 0)
793 idlen += sizeof (u_int64_t);
794 else
795 idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */
796 MALLOC(newclp, struct nfsclclient *,
797 sizeof (struct nfsclclient) + idlen - 1, M_NFSCLCLIENT,
798 M_WAITOK | M_ZERO);
799 }
800 NFSLOCKCLSTATE();
801 /*
802 * If a forced dismount is already in progress, don't
803 * allocate a new clientid and get out now. For the case where
804 * clp != NULL, this is a harmless optimization.
805 */
806 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
807 NFSUNLOCKCLSTATE();
808 if (newclp != NULL)
809 free(newclp, M_NFSCLCLIENT);
810 return (EBADF);
811 }
812 clp = nmp->nm_clp;
813 if (clp == NULL) {
814 if (newclp == NULL) {
815 NFSUNLOCKCLSTATE();
816 return (EACCES);
817 }
818 clp = newclp;
819 clp->nfsc_idlen = idlen;
820 LIST_INIT(&clp->nfsc_owner);
821 TAILQ_INIT(&clp->nfsc_deleg);
822 TAILQ_INIT(&clp->nfsc_layout);
823 LIST_INIT(&clp->nfsc_devinfo);
824 for (i = 0; i < NFSCLDELEGHASHSIZE; i++)
825 LIST_INIT(&clp->nfsc_deleghash[i]);
826 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
827 LIST_INIT(&clp->nfsc_layouthash[i]);
828 clp->nfsc_flags = NFSCLFLAGS_INITED;
829 clp->nfsc_clientidrev = 1;
830 clp->nfsc_cbident = nfscl_nextcbident();
831 nfscl_fillclid(nmp->nm_clval, uuid, clp->nfsc_id,
832 clp->nfsc_idlen);
833 LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list);
834 nmp->nm_clp = clp;
835 clp->nfsc_nmp = nmp;
836 NFSUNLOCKCLSTATE();
837 if (start_renewthread != 0)
838 nfscl_start_renewthread(clp);
839 } else {
840 NFSUNLOCKCLSTATE();
841 if (newclp != NULL)
842 free(newclp, M_NFSCLCLIENT);
843 }
844 NFSLOCKCLSTATE();
845 while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock &&
846 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0)
847 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
848 NFSCLSTATEMUTEXPTR, mp);
849 if (igotlock == 0) {
850 /*
851 * Call nfsv4_lock() with "iwantlock == 0" so that it will
852 * wait for a pending exclusive lock request. This gives the
853 * exclusive lock request priority over this shared lock
854 * request.
855 * An exclusive lock on nfsc_lock is used mainly for server
856 * crash recoveries.
857 */
858 nfsv4_lock(&clp->nfsc_lock, 0, NULL, NFSCLSTATEMUTEXPTR, mp);
859 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
860 }
861 if (igotlock == 0 && (mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
862 /*
863 * Both nfsv4_lock() and nfsv4_getref() know to check
864 * for MNTK_UNMOUNTF and return without sleeping to
865 * wait for the exclusive lock to be released, since it
866 * might be held by nfscl_umount() and we need to get out
867 * now for that case and not wait until nfscl_umount()
868 * releases it.
869 */
870 NFSUNLOCKCLSTATE();
871 return (EBADF);
872 }
873 NFSUNLOCKCLSTATE();
874
875 /*
876 * If it needs a clientid, do the setclientid now.
877 */
878 if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) {
879 if (!igotlock)
880 panic("nfscl_clget");
881 if (p == NULL || cred == NULL) {
882 NFSLOCKCLSTATE();
883 nfsv4_unlock(&clp->nfsc_lock, 0);
884 NFSUNLOCKCLSTATE();
885 return (EACCES);
886 }
887 /*
888 * If RFC3530 Sec. 14.2.33 is taken literally,
889 * NFSERR_CLIDINUSE will be returned persistently for the
890 * case where a new mount of the same file system is using
891 * a different principal. In practice, NFSERR_CLIDINUSE is
892 * only returned when there is outstanding unexpired state
893 * on the clientid. As such, try for twice the lease
894 * interval, if we know what that is. Otherwise, make a
895 * wild ass guess.
896 * The case of returning NFSERR_STALECLIENTID is far less
897 * likely, but might occur if there is a significant delay
898 * between doing the SetClientID and SetClientIDConfirm Ops,
899 * such that the server throws away the clientid before
900 * receiving the SetClientIDConfirm.
901 */
902 if (clp->nfsc_renew > 0)
903 clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2;
904 else
905 clidinusedelay = 120;
906 trystalecnt = 3;
907 do {
908 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
909 if (error == NFSERR_STALECLIENTID ||
910 error == NFSERR_STALEDONTRECOVER ||
911 error == NFSERR_BADSESSION ||
912 error == NFSERR_CLIDINUSE) {
913 (void) nfs_catnap(PZERO, error, "nfs_setcl");
914 }
915 } while (((error == NFSERR_STALECLIENTID ||
916 error == NFSERR_BADSESSION ||
917 error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) ||
918 (error == NFSERR_CLIDINUSE && --clidinusedelay > 0));
919 if (error) {
920 NFSLOCKCLSTATE();
921 nfsv4_unlock(&clp->nfsc_lock, 0);
922 NFSUNLOCKCLSTATE();
923 return (error);
924 }
925 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
926 }
927 if (igotlock) {
928 NFSLOCKCLSTATE();
929 nfsv4_unlock(&clp->nfsc_lock, 1);
930 NFSUNLOCKCLSTATE();
931 }
932
933 *clpp = clp;
934 return (0);
935 }
936
937 /*
938 * Get a reference to a clientid and return it, if valid.
939 */
940 APPLESTATIC struct nfsclclient *
941 nfscl_findcl(struct nfsmount *nmp)
942 {
943 struct nfsclclient *clp;
944
945 clp = nmp->nm_clp;
946 if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID))
947 return (NULL);
948 return (clp);
949 }
950
951 /*
952 * Release the clientid structure. It may be locked or reference counted.
953 */
954 static void
955 nfscl_clrelease(struct nfsclclient *clp)
956 {
957
958 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
959 nfsv4_unlock(&clp->nfsc_lock, 0);
960 else
961 nfsv4_relref(&clp->nfsc_lock);
962 }
963
964 /*
965 * External call for nfscl_clrelease.
966 */
967 APPLESTATIC void
968 nfscl_clientrelease(struct nfsclclient *clp)
969 {
970
971 NFSLOCKCLSTATE();
972 if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK)
973 nfsv4_unlock(&clp->nfsc_lock, 0);
974 else
975 nfsv4_relref(&clp->nfsc_lock);
976 NFSUNLOCKCLSTATE();
977 }
978
979 /*
980 * Called when wanting to lock a byte region.
981 */
982 APPLESTATIC int
983 nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
984 short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp,
985 int recovery, void *id, int flags, u_int8_t *rownp, u_int8_t *ropenownp,
986 struct nfscllockowner **lpp, int *newonep, int *donelocallyp)
987 {
988 struct nfscllockowner *lp;
989 struct nfsclopen *op;
990 struct nfsclclient *clp;
991 struct nfscllockowner *nlp;
992 struct nfscllock *nlop, *otherlop;
993 struct nfscldeleg *dp = NULL, *ldp = NULL;
994 struct nfscllockownerhead *lhp = NULL;
995 struct nfsnode *np;
996 u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp, openown[NFSV4CL_LOCKNAMELEN];
997 u_int8_t *openownp;
998 int error = 0, ret, donelocally = 0;
999 u_int32_t mode;
1000
1001 /* For Lock Ops, the open mode doesn't matter, so use 0 to match any. */
1002 mode = 0;
1003 np = VTONFS(vp);
1004 *lpp = NULL;
1005 lp = NULL;
1006 *newonep = 0;
1007 *donelocallyp = 0;
1008
1009 /*
1010 * Might need these, so MALLOC them now, to
1011 * avoid a tsleep() in MALLOC later.
1012 */
1013 MALLOC(nlp, struct nfscllockowner *,
1014 sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK);
1015 MALLOC(otherlop, struct nfscllock *,
1016 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1017 MALLOC(nlop, struct nfscllock *,
1018 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1019 nlop->nfslo_type = type;
1020 nlop->nfslo_first = off;
1021 if (len == NFS64BITSSET) {
1022 nlop->nfslo_end = NFS64BITSSET;
1023 } else {
1024 nlop->nfslo_end = off + len;
1025 if (nlop->nfslo_end <= nlop->nfslo_first)
1026 error = NFSERR_INVAL;
1027 }
1028
1029 if (!error) {
1030 if (recovery)
1031 clp = rclp;
1032 else
1033 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1034 }
1035 if (error) {
1036 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1037 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1038 FREE((caddr_t)nlop, M_NFSCLLOCK);
1039 return (error);
1040 }
1041
1042 op = NULL;
1043 if (recovery) {
1044 ownp = rownp;
1045 openownp = ropenownp;
1046 } else {
1047 nfscl_filllockowner(id, own, flags);
1048 ownp = own;
1049 if (NFSHASONEOPENOWN(VFSTONFS(vnode_mount(vp))))
1050 nfscl_filllockowner(NULL, openown, F_POSIX);
1051 else
1052 nfscl_filllockowner(p->td_proc, openown, F_POSIX);
1053 openownp = openown;
1054 }
1055 if (!recovery) {
1056 NFSLOCKCLSTATE();
1057 /*
1058 * First, search for a delegation. If one exists for this file,
1059 * the lock can be done locally against it, so long as there
1060 * isn't a local lock conflict.
1061 */
1062 ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1063 np->n_fhp->nfh_len);
1064 /* Just sanity check for correct type of delegation */
1065 if (dp != NULL && ((dp->nfsdl_flags &
1066 (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) != 0 ||
1067 (type == F_WRLCK &&
1068 (dp->nfsdl_flags & NFSCLDL_WRITE) == 0)))
1069 dp = NULL;
1070 }
1071 if (dp != NULL) {
1072 /* Now, find an open and maybe a lockowner. */
1073 ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh,
1074 np->n_fhp->nfh_len, openownp, ownp, mode, NULL, &op);
1075 if (ret)
1076 ret = nfscl_getopen(&clp->nfsc_owner,
1077 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1078 ownp, mode, NULL, &op);
1079 if (!ret) {
1080 lhp = &dp->nfsdl_lock;
1081 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
1082 TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list);
1083 dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
1084 donelocally = 1;
1085 } else {
1086 dp = NULL;
1087 }
1088 }
1089 if (!donelocally) {
1090 /*
1091 * Get the related Open and maybe lockowner.
1092 */
1093 error = nfscl_getopen(&clp->nfsc_owner,
1094 np->n_fhp->nfh_fh, np->n_fhp->nfh_len, openownp,
1095 ownp, mode, &lp, &op);
1096 if (!error)
1097 lhp = &op->nfso_lock;
1098 }
1099 if (!error && !recovery)
1100 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh,
1101 np->n_fhp->nfh_len, nlop, ownp, ldp, NULL);
1102 if (error) {
1103 if (!recovery) {
1104 nfscl_clrelease(clp);
1105 NFSUNLOCKCLSTATE();
1106 }
1107 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1108 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1109 FREE((caddr_t)nlop, M_NFSCLLOCK);
1110 return (error);
1111 }
1112
1113 /*
1114 * Ok, see if a lockowner exists and create one, as required.
1115 */
1116 if (lp == NULL)
1117 LIST_FOREACH(lp, lhp, nfsl_list) {
1118 if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN))
1119 break;
1120 }
1121 if (lp == NULL) {
1122 NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN);
1123 if (recovery)
1124 NFSBCOPY(ropenownp, nlp->nfsl_openowner,
1125 NFSV4CL_LOCKNAMELEN);
1126 else
1127 NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner,
1128 NFSV4CL_LOCKNAMELEN);
1129 nlp->nfsl_seqid = 0;
1130 nlp->nfsl_lockflags = flags;
1131 nlp->nfsl_inprog = NULL;
1132 nfscl_lockinit(&nlp->nfsl_rwlock);
1133 LIST_INIT(&nlp->nfsl_lock);
1134 if (donelocally) {
1135 nlp->nfsl_open = NULL;
1136 newnfsstats.cllocallockowners++;
1137 } else {
1138 nlp->nfsl_open = op;
1139 newnfsstats.cllockowners++;
1140 }
1141 LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
1142 lp = nlp;
1143 nlp = NULL;
1144 *newonep = 1;
1145 }
1146
1147 /*
1148 * Now, update the byte ranges for locks.
1149 */
1150 ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally);
1151 if (!ret)
1152 donelocally = 1;
1153 if (donelocally) {
1154 *donelocallyp = 1;
1155 if (!recovery)
1156 nfscl_clrelease(clp);
1157 } else {
1158 /*
1159 * Serial modifications on the lock owner for multiple threads
1160 * for the same process using a read/write lock.
1161 */
1162 if (!recovery)
1163 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1164 }
1165 if (!recovery)
1166 NFSUNLOCKCLSTATE();
1167
1168 if (nlp)
1169 FREE((caddr_t)nlp, M_NFSCLLOCKOWNER);
1170 if (nlop)
1171 FREE((caddr_t)nlop, M_NFSCLLOCK);
1172 if (otherlop)
1173 FREE((caddr_t)otherlop, M_NFSCLLOCK);
1174
1175 *lpp = lp;
1176 return (0);
1177 }
1178
1179 /*
1180 * Called to unlock a byte range, for LockU.
1181 */
1182 APPLESTATIC int
1183 nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
1184 __unused struct ucred *cred, NFSPROC_T *p, int callcnt,
1185 struct nfsclclient *clp, void *id, int flags,
1186 struct nfscllockowner **lpp, int *dorpcp)
1187 {
1188 struct nfscllockowner *lp;
1189 struct nfsclowner *owp;
1190 struct nfsclopen *op;
1191 struct nfscllock *nlop, *other_lop = NULL;
1192 struct nfscldeleg *dp;
1193 struct nfsnode *np;
1194 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1195 int ret = 0, fnd;
1196
1197 np = VTONFS(vp);
1198 *lpp = NULL;
1199 *dorpcp = 0;
1200
1201 /*
1202 * Might need these, so MALLOC them now, to
1203 * avoid a tsleep() in MALLOC later.
1204 */
1205 MALLOC(nlop, struct nfscllock *,
1206 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1207 nlop->nfslo_type = F_UNLCK;
1208 nlop->nfslo_first = off;
1209 if (len == NFS64BITSSET) {
1210 nlop->nfslo_end = NFS64BITSSET;
1211 } else {
1212 nlop->nfslo_end = off + len;
1213 if (nlop->nfslo_end <= nlop->nfslo_first) {
1214 FREE((caddr_t)nlop, M_NFSCLLOCK);
1215 return (NFSERR_INVAL);
1216 }
1217 }
1218 if (callcnt == 0) {
1219 MALLOC(other_lop, struct nfscllock *,
1220 sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK);
1221 *other_lop = *nlop;
1222 }
1223 nfscl_filllockowner(id, own, flags);
1224 dp = NULL;
1225 NFSLOCKCLSTATE();
1226 if (callcnt == 0)
1227 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
1228 np->n_fhp->nfh_len);
1229
1230 /*
1231 * First, unlock any local regions on a delegation.
1232 */
1233 if (dp != NULL) {
1234 /* Look for this lockowner. */
1235 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1236 if (!NFSBCMP(lp->nfsl_owner, own,
1237 NFSV4CL_LOCKNAMELEN))
1238 break;
1239 }
1240 if (lp != NULL)
1241 /* Use other_lop, so nlop is still available */
1242 (void)nfscl_updatelock(lp, &other_lop, NULL, 1);
1243 }
1244
1245 /*
1246 * Now, find a matching open/lockowner that hasn't already been done,
1247 * as marked by nfsl_inprog.
1248 */
1249 lp = NULL;
1250 fnd = 0;
1251 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1252 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1253 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1254 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1255 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1256 if (lp->nfsl_inprog == NULL &&
1257 !NFSBCMP(lp->nfsl_owner, own,
1258 NFSV4CL_LOCKNAMELEN)) {
1259 fnd = 1;
1260 break;
1261 }
1262 }
1263 if (fnd)
1264 break;
1265 }
1266 }
1267 if (fnd)
1268 break;
1269 }
1270
1271 if (lp != NULL) {
1272 ret = nfscl_updatelock(lp, &nlop, NULL, 0);
1273 if (ret)
1274 *dorpcp = 1;
1275 /*
1276 * Serial modifications on the lock owner for multiple
1277 * threads for the same process using a read/write lock.
1278 */
1279 lp->nfsl_inprog = p;
1280 nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR);
1281 *lpp = lp;
1282 }
1283 NFSUNLOCKCLSTATE();
1284 if (nlop)
1285 FREE((caddr_t)nlop, M_NFSCLLOCK);
1286 if (other_lop)
1287 FREE((caddr_t)other_lop, M_NFSCLLOCK);
1288 return (0);
1289 }
1290
1291 /*
1292 * Release all lockowners marked in progess for this process and file.
1293 */
1294 APPLESTATIC void
1295 nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p,
1296 void *id, int flags)
1297 {
1298 struct nfsclowner *owp;
1299 struct nfsclopen *op;
1300 struct nfscllockowner *lp;
1301 struct nfsnode *np;
1302 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1303
1304 np = VTONFS(vp);
1305 nfscl_filllockowner(id, own, flags);
1306 NFSLOCKCLSTATE();
1307 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1308 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1309 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1310 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1311 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1312 if (lp->nfsl_inprog == p &&
1313 !NFSBCMP(lp->nfsl_owner, own,
1314 NFSV4CL_LOCKNAMELEN)) {
1315 lp->nfsl_inprog = NULL;
1316 nfscl_lockunlock(&lp->nfsl_rwlock);
1317 }
1318 }
1319 }
1320 }
1321 }
1322 nfscl_clrelease(clp);
1323 NFSUNLOCKCLSTATE();
1324 }
1325
1326 /*
1327 * Called to find out if any bytes within the byte range specified are
1328 * write locked by the calling process. Used to determine if flushing
1329 * is required before a LockU.
1330 * If in doubt, return 1, so the flush will occur.
1331 */
1332 APPLESTATIC int
1333 nfscl_checkwritelocked(vnode_t vp, struct flock *fl,
1334 struct ucred *cred, NFSPROC_T *p, void *id, int flags)
1335 {
1336 struct nfsclowner *owp;
1337 struct nfscllockowner *lp;
1338 struct nfsclopen *op;
1339 struct nfsclclient *clp;
1340 struct nfscllock *lop;
1341 struct nfscldeleg *dp;
1342 struct nfsnode *np;
1343 u_int64_t off, end;
1344 u_int8_t own[NFSV4CL_LOCKNAMELEN];
1345 int error = 0;
1346
1347 np = VTONFS(vp);
1348 switch (fl->l_whence) {
1349 case SEEK_SET:
1350 case SEEK_CUR:
1351 /*
1352 * Caller is responsible for adding any necessary offset
1353 * when SEEK_CUR is used.
1354 */
1355 off = fl->l_start;
1356 break;
1357 case SEEK_END:
1358 off = np->n_size + fl->l_start;
1359 break;
1360 default:
1361 return (1);
1362 };
1363 if (fl->l_len != 0) {
1364 end = off + fl->l_len;
1365 if (end < off)
1366 return (1);
1367 } else {
1368 end = NFS64BITSSET;
1369 }
1370
1371 error = nfscl_getcl(vnode_mount(vp), cred, p, 1, &clp);
1372 if (error)
1373 return (1);
1374 nfscl_filllockowner(id, own, flags);
1375 NFSLOCKCLSTATE();
1376
1377 /*
1378 * First check the delegation locks.
1379 */
1380 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
1381 if (dp != NULL) {
1382 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
1383 if (!NFSBCMP(lp->nfsl_owner, own,
1384 NFSV4CL_LOCKNAMELEN))
1385 break;
1386 }
1387 if (lp != NULL) {
1388 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1389 if (lop->nfslo_first >= end)
1390 break;
1391 if (lop->nfslo_end <= off)
1392 continue;
1393 if (lop->nfslo_type == F_WRLCK) {
1394 nfscl_clrelease(clp);
1395 NFSUNLOCKCLSTATE();
1396 return (1);
1397 }
1398 }
1399 }
1400 }
1401
1402 /*
1403 * Now, check state against the server.
1404 */
1405 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
1406 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1407 if (op->nfso_fhlen == np->n_fhp->nfh_len &&
1408 !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) {
1409 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1410 if (!NFSBCMP(lp->nfsl_owner, own,
1411 NFSV4CL_LOCKNAMELEN))
1412 break;
1413 }
1414 if (lp != NULL) {
1415 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
1416 if (lop->nfslo_first >= end)
1417 break;
1418 if (lop->nfslo_end <= off)
1419 continue;
1420 if (lop->nfslo_type == F_WRLCK) {
1421 nfscl_clrelease(clp);
1422 NFSUNLOCKCLSTATE();
1423 return (1);
1424 }
1425 }
1426 }
1427 }
1428 }
1429 }
1430 nfscl_clrelease(clp);
1431 NFSUNLOCKCLSTATE();
1432 return (0);
1433 }
1434
1435 /*
1436 * Release a byte range lock owner structure.
1437 */
1438 APPLESTATIC void
1439 nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete)
1440 {
1441 struct nfsclclient *clp;
1442
1443 if (lp == NULL)
1444 return;
1445 NFSLOCKCLSTATE();
1446 clp = lp->nfsl_open->nfso_own->nfsow_clp;
1447 if (error != 0 && candelete &&
1448 (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0)
1449 nfscl_freelockowner(lp, 0);
1450 else
1451 nfscl_lockunlock(&lp->nfsl_rwlock);
1452 nfscl_clrelease(clp);
1453 NFSUNLOCKCLSTATE();
1454 }
1455
1456 /*
1457 * Free up an open structure and any associated byte range lock structures.
1458 */
1459 APPLESTATIC void
1460 nfscl_freeopen(struct nfsclopen *op, int local)
1461 {
1462
1463 LIST_REMOVE(op, nfso_list);
1464 nfscl_freealllocks(&op->nfso_lock, local);
1465 FREE((caddr_t)op, M_NFSCLOPEN);
1466 if (local)
1467 newnfsstats.cllocalopens--;
1468 else
1469 newnfsstats.clopens--;
1470 }
1471
1472 /*
1473 * Free up all lock owners and associated locks.
1474 */
1475 static void
1476 nfscl_freealllocks(struct nfscllockownerhead *lhp, int local)
1477 {
1478 struct nfscllockowner *lp, *nlp;
1479
1480 LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) {
1481 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1482 panic("nfscllckw");
1483 nfscl_freelockowner(lp, local);
1484 }
1485 }
1486
1487 /*
1488 * Called for an Open when NFSERR_EXPIRED is received from the server.
1489 * If there are no byte range locks nor a Share Deny lost, try to do a
1490 * fresh Open. Otherwise, free the open.
1491 */
1492 static int
1493 nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
1494 struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p)
1495 {
1496 struct nfscllockowner *lp;
1497 struct nfscldeleg *dp;
1498 int mustdelete = 0, error;
1499
1500 /*
1501 * Look for any byte range lock(s).
1502 */
1503 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
1504 if (!LIST_EMPTY(&lp->nfsl_lock)) {
1505 mustdelete = 1;
1506 break;
1507 }
1508 }
1509
1510 /*
1511 * If no byte range lock(s) nor a Share deny, try to re-open.
1512 */
1513 if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) {
1514 newnfs_copycred(&op->nfso_cred, cred);
1515 dp = NULL;
1516 error = nfsrpc_reopen(nmp, op->nfso_fh,
1517 op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p);
1518 if (error) {
1519 mustdelete = 1;
1520 if (dp != NULL) {
1521 FREE((caddr_t)dp, M_NFSCLDELEG);
1522 dp = NULL;
1523 }
1524 }
1525 if (dp != NULL)
1526 nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh,
1527 op->nfso_fhlen, cred, p, &dp);
1528 }
1529
1530 /*
1531 * If a byte range lock or Share deny or couldn't re-open, free it.
1532 */
1533 if (mustdelete)
1534 nfscl_freeopen(op, 0);
1535 return (mustdelete);
1536 }
1537
1538 /*
1539 * Free up an open owner structure.
1540 */
1541 static void
1542 nfscl_freeopenowner(struct nfsclowner *owp, int local)
1543 {
1544
1545 LIST_REMOVE(owp, nfsow_list);
1546 FREE((caddr_t)owp, M_NFSCLOWNER);
1547 if (local)
1548 newnfsstats.cllocalopenowners--;
1549 else
1550 newnfsstats.clopenowners--;
1551 }
1552
1553 /*
1554 * Free up a byte range lock owner structure.
1555 */
1556 APPLESTATIC void
1557 nfscl_freelockowner(struct nfscllockowner *lp, int local)
1558 {
1559 struct nfscllock *lop, *nlop;
1560
1561 LIST_REMOVE(lp, nfsl_list);
1562 LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
1563 nfscl_freelock(lop, local);
1564 }
1565 FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
1566 if (local)
1567 newnfsstats.cllocallockowners--;
1568 else
1569 newnfsstats.cllockowners--;
1570 }
1571
1572 /*
1573 * Free up a byte range lock structure.
1574 */
1575 APPLESTATIC void
1576 nfscl_freelock(struct nfscllock *lop, int local)
1577 {
1578
1579 LIST_REMOVE(lop, nfslo_list);
1580 FREE((caddr_t)lop, M_NFSCLLOCK);
1581 if (local)
1582 newnfsstats.cllocallocks--;
1583 else
1584 newnfsstats.cllocks--;
1585 }
1586
1587 /*
1588 * Clean out the state related to a delegation.
1589 */
1590 static void
1591 nfscl_cleandeleg(struct nfscldeleg *dp)
1592 {
1593 struct nfsclowner *owp, *nowp;
1594 struct nfsclopen *op;
1595
1596 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
1597 op = LIST_FIRST(&owp->nfsow_open);
1598 if (op != NULL) {
1599 if (LIST_NEXT(op, nfso_list) != NULL)
1600 panic("nfscleandel");
1601 nfscl_freeopen(op, 1);
1602 }
1603 nfscl_freeopenowner(owp, 1);
1604 }
1605 nfscl_freealllocks(&dp->nfsdl_lock, 1);
1606 }
1607
1608 /*
1609 * Free a delegation.
1610 */
1611 static void
1612 nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
1613 {
1614
1615 TAILQ_REMOVE(hdp, dp, nfsdl_list);
1616 LIST_REMOVE(dp, nfsdl_hash);
1617 FREE((caddr_t)dp, M_NFSCLDELEG);
1618 newnfsstats.cldelegates--;
1619 nfscl_delegcnt--;
1620 }
1621
1622 /*
1623 * Free up all state related to this client structure.
1624 */
1625 static void
1626 nfscl_cleanclient(struct nfsclclient *clp)
1627 {
1628 struct nfsclowner *owp, *nowp;
1629 struct nfsclopen *op, *nop;
1630
1631 /* Now, all the OpenOwners, etc. */
1632 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1633 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1634 nfscl_freeopen(op, 0);
1635 }
1636 nfscl_freeopenowner(owp, 0);
1637 }
1638 }
1639
1640 /*
1641 * Called when an NFSERR_EXPIRED is received from the server.
1642 */
1643 static void
1644 nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
1645 struct ucred *cred, NFSPROC_T *p)
1646 {
1647 struct nfsclowner *owp, *nowp, *towp;
1648 struct nfsclopen *op, *nop, *top;
1649 struct nfscldeleg *dp, *ndp;
1650 int ret, printed = 0;
1651
1652 /*
1653 * First, merge locally issued Opens into the list for the server.
1654 */
1655 dp = TAILQ_FIRST(&clp->nfsc_deleg);
1656 while (dp != NULL) {
1657 ndp = TAILQ_NEXT(dp, nfsdl_list);
1658 owp = LIST_FIRST(&dp->nfsdl_owner);
1659 while (owp != NULL) {
1660 nowp = LIST_NEXT(owp, nfsow_list);
1661 op = LIST_FIRST(&owp->nfsow_open);
1662 if (op != NULL) {
1663 if (LIST_NEXT(op, nfso_list) != NULL)
1664 panic("nfsclexp");
1665 LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) {
1666 if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner,
1667 NFSV4CL_LOCKNAMELEN))
1668 break;
1669 }
1670 if (towp != NULL) {
1671 /* Merge opens in */
1672 LIST_FOREACH(top, &towp->nfsow_open, nfso_list) {
1673 if (top->nfso_fhlen == op->nfso_fhlen &&
1674 !NFSBCMP(top->nfso_fh, op->nfso_fh,
1675 op->nfso_fhlen)) {
1676 top->nfso_mode |= op->nfso_mode;
1677 top->nfso_opencnt += op->nfso_opencnt;
1678 break;
1679 }
1680 }
1681 if (top == NULL) {
1682 /* Just add the open to the owner list */
1683 LIST_REMOVE(op, nfso_list);
1684 op->nfso_own = towp;
1685 LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
1686 newnfsstats.cllocalopens--;
1687 newnfsstats.clopens++;
1688 }
1689 } else {
1690 /* Just add the openowner to the client list */
1691 LIST_REMOVE(owp, nfsow_list);
1692 owp->nfsow_clp = clp;
1693 LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
1694 newnfsstats.cllocalopenowners--;
1695 newnfsstats.clopenowners++;
1696 newnfsstats.cllocalopens--;
1697 newnfsstats.clopens++;
1698 }
1699 }
1700 owp = nowp;
1701 }
1702 if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) {
1703 printed = 1;
1704 printf("nfsv4 expired locks lost\n");
1705 }
1706 nfscl_cleandeleg(dp);
1707 nfscl_freedeleg(&clp->nfsc_deleg, dp);
1708 dp = ndp;
1709 }
1710 if (!TAILQ_EMPTY(&clp->nfsc_deleg))
1711 panic("nfsclexp");
1712
1713 /*
1714 * Now, try and reopen against the server.
1715 */
1716 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1717 owp->nfsow_seqid = 0;
1718 LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) {
1719 ret = nfscl_expireopen(clp, op, nmp, cred, p);
1720 if (ret && !printed) {
1721 printed = 1;
1722 printf("nfsv4 expired locks lost\n");
1723 }
1724 }
1725 if (LIST_EMPTY(&owp->nfsow_open))
1726 nfscl_freeopenowner(owp, 0);
1727 }
1728 }
1729
1730 /*
1731 * This function must be called after the process represented by "own" has
1732 * exited. Must be called with CLSTATE lock held.
1733 */
1734 static void
1735 nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
1736 {
1737 struct nfsclowner *owp, *nowp;
1738 struct nfscllockowner *lp, *nlp;
1739 struct nfscldeleg *dp;
1740
1741 /* First, get rid of local locks on delegations. */
1742 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1743 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1744 if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
1745 if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
1746 panic("nfscllckw");
1747 nfscl_freelockowner(lp, 1);
1748 }
1749 }
1750 }
1751 owp = LIST_FIRST(&clp->nfsc_owner);
1752 while (owp != NULL) {
1753 nowp = LIST_NEXT(owp, nfsow_list);
1754 if (!NFSBCMP(owp->nfsow_owner, own,
1755 NFSV4CL_LOCKNAMELEN)) {
1756 /*
1757 * If there are children that haven't closed the
1758 * file descriptors yet, the opens will still be
1759 * here. For that case, let the renew thread clear
1760 * out the OpenOwner later.
1761 */
1762 if (LIST_EMPTY(&owp->nfsow_open))
1763 nfscl_freeopenowner(owp, 0);
1764 else
1765 owp->nfsow_defunct = 1;
1766 }
1767 owp = nowp;
1768 }
1769 }
1770
1771 /*
1772 * Find open/lock owners for processes that have exited.
1773 */
1774 static void
1775 nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
1776 {
1777 struct nfsclowner *owp, *nowp;
1778 struct nfsclopen *op;
1779 struct nfscllockowner *lp, *nlp;
1780 struct nfscldeleg *dp;
1781
1782 NFSPROCLISTLOCK();
1783 NFSLOCKCLSTATE();
1784 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
1785 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
1786 LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
1787 if (LIST_EMPTY(&lp->nfsl_lock))
1788 nfscl_emptylockowner(lp, lhp);
1789 }
1790 }
1791 if (nfscl_procdoesntexist(owp->nfsow_owner))
1792 nfscl_cleanup_common(clp, owp->nfsow_owner);
1793 }
1794
1795 /*
1796 * For the single open_owner case, these lock owners need to be
1797 * checked to see if they still exist separately.
1798 * This is because nfscl_procdoesntexist() never returns true for
1799 * the single open_owner so that the above doesn't ever call
1800 * nfscl_cleanup_common().
1801 */
1802 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
1803 LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
1804 if (nfscl_procdoesntexist(lp->nfsl_owner))
1805 nfscl_cleanup_common(clp, lp->nfsl_owner);
1806 }
1807 }
1808 NFSUNLOCKCLSTATE();
1809 NFSPROCLISTUNLOCK();
1810 }
1811
1812 /*
1813 * Take the empty lock owner and move it to the local lhp list if the
1814 * associated process no longer exists.
1815 */
1816 static void
1817 nfscl_emptylockowner(struct nfscllockowner *lp,
1818 struct nfscllockownerfhhead *lhp)
1819 {
1820 struct nfscllockownerfh *lfhp, *mylfhp;
1821 struct nfscllockowner *nlp;
1822 int fnd_it;
1823
1824 /* If not a Posix lock owner, just return. */
1825 if ((lp->nfsl_lockflags & F_POSIX) == 0)
1826 return;
1827
1828 fnd_it = 0;
1829 mylfhp = NULL;
1830 /*
1831 * First, search to see if this lock owner is already in the list.
1832 * If it is, then the associated process no longer exists.
1833 */
1834 SLIST_FOREACH(lfhp, lhp, nfslfh_list) {
1835 if (lfhp->nfslfh_len == lp->nfsl_open->nfso_fhlen &&
1836 !NFSBCMP(lfhp->nfslfh_fh, lp->nfsl_open->nfso_fh,
1837 lfhp->nfslfh_len))
1838 mylfhp = lfhp;
1839 LIST_FOREACH(nlp, &lfhp->nfslfh_lock, nfsl_list)
1840 if (!NFSBCMP(nlp->nfsl_owner, lp->nfsl_owner,
1841 NFSV4CL_LOCKNAMELEN))
1842 fnd_it = 1;
1843 }
1844 /* If not found, check if process still exists. */
1845 if (fnd_it == 0 && nfscl_procdoesntexist(lp->nfsl_owner) == 0)
1846 return;
1847
1848 /* Move the lock owner over to the local list. */
1849 if (mylfhp == NULL) {
1850 mylfhp = malloc(sizeof(struct nfscllockownerfh), M_TEMP,
1851 M_NOWAIT);
1852 if (mylfhp == NULL)
1853 return;
1854 mylfhp->nfslfh_len = lp->nfsl_open->nfso_fhlen;
1855 NFSBCOPY(lp->nfsl_open->nfso_fh, mylfhp->nfslfh_fh,
1856 mylfhp->nfslfh_len);
1857 LIST_INIT(&mylfhp->nfslfh_lock);
1858 SLIST_INSERT_HEAD(lhp, mylfhp, nfslfh_list);
1859 }
1860 LIST_REMOVE(lp, nfsl_list);
1861 LIST_INSERT_HEAD(&mylfhp->nfslfh_lock, lp, nfsl_list);
1862 }
1863
1864 static int fake_global; /* Used to force visibility of MNTK_UNMOUNTF */
1865 /*
1866 * Called from nfs umount to free up the clientid.
1867 */
1868 APPLESTATIC void
1869 nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p)
1870 {
1871 struct nfsclclient *clp;
1872 struct ucred *cred;
1873 int igotlock;
1874
1875 /*
1876 * For the case that matters, this is the thread that set
1877 * MNTK_UNMOUNTF, so it will see it set. The code that follows is
1878 * done to ensure that any thread executing nfscl_getcl() after
1879 * this time, will see MNTK_UNMOUNTF set. nfscl_getcl() uses the
1880 * mutex for NFSLOCKCLSTATE(), so it is "m" for the following
1881 * explanation, courtesy of Alan Cox.
1882 * What follows is a snippet from Alan Cox's email at:
1883 * http://docs.FreeBSD.org/cgi/
1884 * mid.cgi?BANLkTikR3d65zPHo9==08ZfJ2vmqZucEvw
1885 *
1886 * 1. Set MNTK_UNMOUNTF
1887 * 2. Acquire a standard FreeBSD mutex "m".
1888 * 3. Update some data structures.
1889 * 4. Release mutex "m".
1890 *
1891 * Then, other threads that acquire "m" after step 4 has occurred will
1892 * see MNTK_UNMOUNTF as set. But, other threads that beat thread X to
1893 * step 2 may or may not see MNTK_UNMOUNTF as set.
1894 */
1895 NFSLOCKCLSTATE();
1896 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
1897 fake_global++;
1898 NFSUNLOCKCLSTATE();
1899 NFSLOCKCLSTATE();
1900 }
1901
1902 clp = nmp->nm_clp;
1903 if (clp != NULL) {
1904 if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0)
1905 panic("nfscl umount");
1906
1907 /*
1908 * First, handshake with the nfscl renew thread, to terminate
1909 * it.
1910 */
1911 clp->nfsc_flags |= NFSCLFLAGS_UMOUNT;
1912 while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD)
1913 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT,
1914 "nfsclumnt", hz);
1915
1916 /*
1917 * Now, get the exclusive lock on the client state, so
1918 * that no uses of the state are still in progress.
1919 */
1920 do {
1921 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1922 NFSCLSTATEMUTEXPTR, NULL);
1923 } while (!igotlock);
1924 NFSUNLOCKCLSTATE();
1925
1926 /*
1927 * Free up all the state. It will expire on the server, but
1928 * maybe we should do a SetClientId/SetClientIdConfirm so
1929 * the server throws it away?
1930 */
1931 LIST_REMOVE(clp, nfsc_list);
1932 nfscl_delegreturnall(clp, p);
1933 cred = newnfs_getcred();
1934 if (NFSHASNFSV4N(nmp)) {
1935 (void)nfsrpc_destroysession(nmp, clp, cred, p);
1936 (void)nfsrpc_destroyclient(nmp, clp, cred, p);
1937 } else
1938 (void)nfsrpc_setclient(nmp, clp, 0, cred, p);
1939 nfscl_cleanclient(clp);
1940 nmp->nm_clp = NULL;
1941 NFSFREECRED(cred);
1942 free(clp, M_NFSCLCLIENT);
1943 } else
1944 NFSUNLOCKCLSTATE();
1945 }
1946
1947 /*
1948 * This function is called when a server replies with NFSERR_STALECLIENTID
1949 * NFSERR_STALESTATEID or NFSERR_BADSESSION. It traverses the clientid lists,
1950 * doing Opens and Locks with reclaim. If these fail, it deletes the
1951 * corresponding state.
1952 */
1953 static void
1954 nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p)
1955 {
1956 struct nfsclowner *owp, *nowp;
1957 struct nfsclopen *op, *nop;
1958 struct nfscllockowner *lp, *nlp;
1959 struct nfscllock *lop, *nlop;
1960 struct nfscldeleg *dp, *ndp, *tdp;
1961 struct nfsmount *nmp;
1962 struct ucred *tcred;
1963 struct nfsclopenhead extra_open;
1964 struct nfscldeleghead extra_deleg;
1965 struct nfsreq *rep;
1966 u_int64_t len;
1967 u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode;
1968 int i, igotlock = 0, error, trycnt, firstlock;
1969 struct nfscllayout *lyp, *nlyp;
1970
1971 /*
1972 * First, lock the client structure, so everyone else will
1973 * block when trying to use state.
1974 */
1975 NFSLOCKCLSTATE();
1976 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
1977 do {
1978 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
1979 NFSCLSTATEMUTEXPTR, NULL);
1980 } while (!igotlock);
1981 NFSUNLOCKCLSTATE();
1982
1983 nmp = clp->nfsc_nmp;
1984 if (nmp == NULL)
1985 panic("nfscl recover");
1986
1987 /*
1988 * For now, just get rid of all layouts. There may be a need
1989 * to do LayoutCommit Ops with reclaim == true later.
1990 */
1991 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp)
1992 nfscl_freelayout(lyp);
1993 TAILQ_INIT(&clp->nfsc_layout);
1994 for (i = 0; i < NFSCLLAYOUTHASHSIZE; i++)
1995 LIST_INIT(&clp->nfsc_layouthash[i]);
1996
1997 trycnt = 5;
1998 do {
1999 error = nfsrpc_setclient(nmp, clp, 1, cred, p);
2000 } while ((error == NFSERR_STALECLIENTID ||
2001 error == NFSERR_BADSESSION ||
2002 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2003 if (error) {
2004 NFSLOCKCLSTATE();
2005 clp->nfsc_flags &= ~(NFSCLFLAGS_RECOVER |
2006 NFSCLFLAGS_RECVRINPROG);
2007 wakeup(&clp->nfsc_flags);
2008 nfsv4_unlock(&clp->nfsc_lock, 0);
2009 NFSUNLOCKCLSTATE();
2010 return;
2011 }
2012 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2013 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2014
2015 /*
2016 * Mark requests already queued on the server, so that they don't
2017 * initiate another recovery cycle. Any requests already in the
2018 * queue that handle state information will have the old stale
2019 * clientid/stateid and will get a NFSERR_STALESTATEID,
2020 * NFSERR_STALECLIENTID or NFSERR_BADSESSION reply from the server.
2021 * This will be translated to NFSERR_STALEDONTRECOVER when
2022 * R_DONTRECOVER is set.
2023 */
2024 NFSLOCKREQ();
2025 TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) {
2026 if (rep->r_nmp == nmp)
2027 rep->r_flags |= R_DONTRECOVER;
2028 }
2029 NFSUNLOCKREQ();
2030
2031 /*
2032 * Now, mark all delegations "need reclaim".
2033 */
2034 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list)
2035 dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM;
2036
2037 TAILQ_INIT(&extra_deleg);
2038 LIST_INIT(&extra_open);
2039 /*
2040 * Now traverse the state lists, doing Open and Lock Reclaims.
2041 */
2042 tcred = newnfs_getcred();
2043 owp = LIST_FIRST(&clp->nfsc_owner);
2044 while (owp != NULL) {
2045 nowp = LIST_NEXT(owp, nfsow_list);
2046 owp->nfsow_seqid = 0;
2047 op = LIST_FIRST(&owp->nfsow_open);
2048 while (op != NULL) {
2049 nop = LIST_NEXT(op, nfso_list);
2050 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2051 /* Search for a delegation to reclaim with the open */
2052 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2053 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2054 continue;
2055 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2056 mode = NFSV4OPEN_ACCESSWRITE;
2057 delegtype = NFSV4OPEN_DELEGATEWRITE;
2058 } else {
2059 mode = NFSV4OPEN_ACCESSREAD;
2060 delegtype = NFSV4OPEN_DELEGATEREAD;
2061 }
2062 if ((op->nfso_mode & mode) == mode &&
2063 op->nfso_fhlen == dp->nfsdl_fhlen &&
2064 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen))
2065 break;
2066 }
2067 ndp = dp;
2068 if (dp == NULL)
2069 delegtype = NFSV4OPEN_DELEGATENONE;
2070 newnfs_copycred(&op->nfso_cred, tcred);
2071 error = nfscl_tryopen(nmp, NULL, op->nfso_fh,
2072 op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen,
2073 op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype,
2074 tcred, p);
2075 if (!error) {
2076 /* Handle any replied delegation */
2077 if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE)
2078 || NFSMNT_RDONLY(nmp->nm_mountp))) {
2079 if ((ndp->nfsdl_flags & NFSCLDL_WRITE))
2080 mode = NFSV4OPEN_ACCESSWRITE;
2081 else
2082 mode = NFSV4OPEN_ACCESSREAD;
2083 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2084 if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM))
2085 continue;
2086 if ((op->nfso_mode & mode) == mode &&
2087 op->nfso_fhlen == dp->nfsdl_fhlen &&
2088 !NFSBCMP(op->nfso_fh, dp->nfsdl_fh,
2089 op->nfso_fhlen)) {
2090 dp->nfsdl_stateid = ndp->nfsdl_stateid;
2091 dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit;
2092 dp->nfsdl_ace = ndp->nfsdl_ace;
2093 dp->nfsdl_change = ndp->nfsdl_change;
2094 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2095 if ((ndp->nfsdl_flags & NFSCLDL_RECALL))
2096 dp->nfsdl_flags |= NFSCLDL_RECALL;
2097 FREE((caddr_t)ndp, M_NFSCLDELEG);
2098 ndp = NULL;
2099 break;
2100 }
2101 }
2102 }
2103 if (ndp != NULL)
2104 TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list);
2105
2106 /* and reclaim all byte range locks */
2107 lp = LIST_FIRST(&op->nfso_lock);
2108 while (lp != NULL) {
2109 nlp = LIST_NEXT(lp, nfsl_list);
2110 lp->nfsl_seqid = 0;
2111 firstlock = 1;
2112 lop = LIST_FIRST(&lp->nfsl_lock);
2113 while (lop != NULL) {
2114 nlop = LIST_NEXT(lop, nfslo_list);
2115 if (lop->nfslo_end == NFS64BITSSET)
2116 len = NFS64BITSSET;
2117 else
2118 len = lop->nfslo_end - lop->nfslo_first;
2119 error = nfscl_trylock(nmp, NULL,
2120 op->nfso_fh, op->nfso_fhlen, lp,
2121 firstlock, 1, lop->nfslo_first, len,
2122 lop->nfslo_type, tcred, p);
2123 if (error != 0)
2124 nfscl_freelock(lop, 0);
2125 else
2126 firstlock = 0;
2127 lop = nlop;
2128 }
2129 /* If no locks, but a lockowner, just delete it. */
2130 if (LIST_EMPTY(&lp->nfsl_lock))
2131 nfscl_freelockowner(lp, 0);
2132 lp = nlp;
2133 }
2134 }
2135 }
2136 if (error != 0 && error != NFSERR_BADSESSION)
2137 nfscl_freeopen(op, 0);
2138 op = nop;
2139 }
2140 owp = nowp;
2141 }
2142
2143 /*
2144 * Now, try and get any delegations not yet reclaimed by cobbling
2145 * to-gether an appropriate open.
2146 */
2147 nowp = NULL;
2148 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2149 while (dp != NULL) {
2150 ndp = TAILQ_NEXT(dp, nfsdl_list);
2151 if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) {
2152 if (nowp == NULL) {
2153 MALLOC(nowp, struct nfsclowner *,
2154 sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK);
2155 /*
2156 * Name must be as long an largest possible
2157 * NFSV4CL_LOCKNAMELEN. 12 for now.
2158 */
2159 NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner,
2160 NFSV4CL_LOCKNAMELEN);
2161 LIST_INIT(&nowp->nfsow_open);
2162 nowp->nfsow_clp = clp;
2163 nowp->nfsow_seqid = 0;
2164 nowp->nfsow_defunct = 0;
2165 nfscl_lockinit(&nowp->nfsow_rwlock);
2166 }
2167 nop = NULL;
2168 if (error != NFSERR_NOGRACE && error != NFSERR_BADSESSION) {
2169 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
2170 dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
2171 nop->nfso_own = nowp;
2172 if ((dp->nfsdl_flags & NFSCLDL_WRITE)) {
2173 nop->nfso_mode = NFSV4OPEN_ACCESSWRITE;
2174 delegtype = NFSV4OPEN_DELEGATEWRITE;
2175 } else {
2176 nop->nfso_mode = NFSV4OPEN_ACCESSREAD;
2177 delegtype = NFSV4OPEN_DELEGATEREAD;
2178 }
2179 nop->nfso_opencnt = 0;
2180 nop->nfso_posixlock = 1;
2181 nop->nfso_fhlen = dp->nfsdl_fhlen;
2182 NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen);
2183 LIST_INIT(&nop->nfso_lock);
2184 nop->nfso_stateid.seqid = 0;
2185 nop->nfso_stateid.other[0] = 0;
2186 nop->nfso_stateid.other[1] = 0;
2187 nop->nfso_stateid.other[2] = 0;
2188 newnfs_copycred(&dp->nfsdl_cred, tcred);
2189 newnfs_copyincred(tcred, &nop->nfso_cred);
2190 tdp = NULL;
2191 error = nfscl_tryopen(nmp, NULL, nop->nfso_fh,
2192 nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen,
2193 nop->nfso_mode, nop, NULL, 0, &tdp, 1,
2194 delegtype, tcred, p);
2195 if (tdp != NULL) {
2196 if ((tdp->nfsdl_flags & NFSCLDL_WRITE))
2197 mode = NFSV4OPEN_ACCESSWRITE;
2198 else
2199 mode = NFSV4OPEN_ACCESSREAD;
2200 if ((nop->nfso_mode & mode) == mode &&
2201 nop->nfso_fhlen == tdp->nfsdl_fhlen &&
2202 !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh,
2203 nop->nfso_fhlen)) {
2204 dp->nfsdl_stateid = tdp->nfsdl_stateid;
2205 dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit;
2206 dp->nfsdl_ace = tdp->nfsdl_ace;
2207 dp->nfsdl_change = tdp->nfsdl_change;
2208 dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM;
2209 if ((tdp->nfsdl_flags & NFSCLDL_RECALL))
2210 dp->nfsdl_flags |= NFSCLDL_RECALL;
2211 FREE((caddr_t)tdp, M_NFSCLDELEG);
2212 } else {
2213 TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list);
2214 }
2215 }
2216 }
2217 if (error) {
2218 if (nop != NULL)
2219 FREE((caddr_t)nop, M_NFSCLOPEN);
2220 /*
2221 * Couldn't reclaim it, so throw the state
2222 * away. Ouch!!
2223 */
2224 nfscl_cleandeleg(dp);
2225 nfscl_freedeleg(&clp->nfsc_deleg, dp);
2226 } else {
2227 LIST_INSERT_HEAD(&extra_open, nop, nfso_list);
2228 }
2229 }
2230 dp = ndp;
2231 }
2232
2233 /*
2234 * Now, get rid of extra Opens and Delegations.
2235 */
2236 LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) {
2237 do {
2238 newnfs_copycred(&op->nfso_cred, tcred);
2239 error = nfscl_tryclose(op, tcred, nmp, p);
2240 if (error == NFSERR_GRACE)
2241 (void) nfs_catnap(PZERO, error, "nfsexcls");
2242 } while (error == NFSERR_GRACE);
2243 LIST_REMOVE(op, nfso_list);
2244 FREE((caddr_t)op, M_NFSCLOPEN);
2245 }
2246 if (nowp != NULL)
2247 FREE((caddr_t)nowp, M_NFSCLOWNER);
2248
2249 TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) {
2250 do {
2251 newnfs_copycred(&dp->nfsdl_cred, tcred);
2252 error = nfscl_trydelegreturn(dp, tcred, nmp, p);
2253 if (error == NFSERR_GRACE)
2254 (void) nfs_catnap(PZERO, error, "nfsexdlg");
2255 } while (error == NFSERR_GRACE);
2256 TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list);
2257 FREE((caddr_t)dp, M_NFSCLDELEG);
2258 }
2259
2260 /* For NFSv4.1 or later, do a RECLAIM_COMPLETE. */
2261 if (NFSHASNFSV4N(nmp))
2262 (void)nfsrpc_reclaimcomplete(nmp, cred, p);
2263
2264 NFSLOCKCLSTATE();
2265 clp->nfsc_flags &= ~NFSCLFLAGS_RECVRINPROG;
2266 wakeup(&clp->nfsc_flags);
2267 nfsv4_unlock(&clp->nfsc_lock, 0);
2268 NFSUNLOCKCLSTATE();
2269 NFSFREECRED(tcred);
2270 }
2271
2272 /*
2273 * This function is called when a server replies with NFSERR_EXPIRED.
2274 * It deletes all state for the client and does a fresh SetClientId/confirm.
2275 * XXX Someday it should post a signal to the process(es) that hold the
2276 * state, so they know that lock state has been lost.
2277 */
2278 APPLESTATIC int
2279 nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p)
2280 {
2281 struct nfsmount *nmp;
2282 struct ucred *cred;
2283 int igotlock = 0, error, trycnt;
2284
2285 /*
2286 * If the clientid has gone away or a new SetClientid has already
2287 * been done, just return ok.
2288 */
2289 if (clp == NULL || clidrev != clp->nfsc_clientidrev)
2290 return (0);
2291
2292 /*
2293 * First, lock the client structure, so everyone else will
2294 * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so
2295 * that only one thread does the work.
2296 */
2297 NFSLOCKCLSTATE();
2298 clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT;
2299 do {
2300 igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL,
2301 NFSCLSTATEMUTEXPTR, NULL);
2302 } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT));
2303 if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) {
2304 if (igotlock)
2305 nfsv4_unlock(&clp->nfsc_lock, 0);
2306 NFSUNLOCKCLSTATE();
2307 return (0);
2308 }
2309 clp->nfsc_flags |= NFSCLFLAGS_RECVRINPROG;
2310 NFSUNLOCKCLSTATE();
2311
2312 nmp = clp->nfsc_nmp;
2313 if (nmp == NULL)
2314 panic("nfscl expired");
2315 cred = newnfs_getcred();
2316 trycnt = 5;
2317 do {
2318 error = nfsrpc_setclient(nmp, clp, 0, cred, p);
2319 } while ((error == NFSERR_STALECLIENTID ||
2320 error == NFSERR_BADSESSION ||
2321 error == NFSERR_STALEDONTRECOVER) && --trycnt > 0);
2322 if (error) {
2323 NFSLOCKCLSTATE();
2324 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2325 } else {
2326 /*
2327 * Expire the state for the client.
2328 */
2329 nfscl_expireclient(clp, nmp, cred, p);
2330 NFSLOCKCLSTATE();
2331 clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID;
2332 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2333 }
2334 clp->nfsc_flags &= ~(NFSCLFLAGS_EXPIREIT | NFSCLFLAGS_RECVRINPROG);
2335 wakeup(&clp->nfsc_flags);
2336 nfsv4_unlock(&clp->nfsc_lock, 0);
2337 NFSUNLOCKCLSTATE();
2338 NFSFREECRED(cred);
2339 return (error);
2340 }
2341
2342 /*
2343 * This function inserts a lock in the list after insert_lop.
2344 */
2345 static void
2346 nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
2347 struct nfscllock *insert_lop, int local)
2348 {
2349
2350 if ((struct nfscllockowner *)insert_lop == lp)
2351 LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list);
2352 else
2353 LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
2354 if (local)
2355 newnfsstats.cllocallocks++;
2356 else
2357 newnfsstats.cllocks++;
2358 }
2359
2360 /*
2361 * This function updates the locking for a lock owner and given file. It
2362 * maintains a list of lock ranges ordered on increasing file offset that
2363 * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style).
2364 * It always adds new_lop to the list and sometimes uses the one pointed
2365 * at by other_lopp.
2366 * Returns 1 if the locks were modified, 0 otherwise.
2367 */
2368 static int
2369 nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp,
2370 struct nfscllock **other_lopp, int local)
2371 {
2372 struct nfscllock *new_lop = *new_lopp;
2373 struct nfscllock *lop, *tlop, *ilop;
2374 struct nfscllock *other_lop;
2375 int unlock = 0, modified = 0;
2376 u_int64_t tmp;
2377
2378 /*
2379 * Work down the list until the lock is merged.
2380 */
2381 if (new_lop->nfslo_type == F_UNLCK)
2382 unlock = 1;
2383 ilop = (struct nfscllock *)lp;
2384 lop = LIST_FIRST(&lp->nfsl_lock);
2385 while (lop != NULL) {
2386 /*
2387 * Only check locks for this file that aren't before the start of
2388 * new lock's range.
2389 */
2390 if (lop->nfslo_end >= new_lop->nfslo_first) {
2391 if (new_lop->nfslo_end < lop->nfslo_first) {
2392 /*
2393 * If the new lock ends before the start of the
2394 * current lock's range, no merge, just insert
2395 * the new lock.
2396 */
2397 break;
2398 }
2399 if (new_lop->nfslo_type == lop->nfslo_type ||
2400 (new_lop->nfslo_first <= lop->nfslo_first &&
2401 new_lop->nfslo_end >= lop->nfslo_end)) {
2402 /*
2403 * This lock can be absorbed by the new lock/unlock.
2404 * This happens when it covers the entire range
2405 * of the old lock or is contiguous
2406 * with the old lock and is of the same type or an
2407 * unlock.
2408 */
2409 if (new_lop->nfslo_type != lop->nfslo_type ||
2410 new_lop->nfslo_first != lop->nfslo_first ||
2411 new_lop->nfslo_end != lop->nfslo_end)
2412 modified = 1;
2413 if (lop->nfslo_first < new_lop->nfslo_first)
2414 new_lop->nfslo_first = lop->nfslo_first;
2415 if (lop->nfslo_end > new_lop->nfslo_end)
2416 new_lop->nfslo_end = lop->nfslo_end;
2417 tlop = lop;
2418 lop = LIST_NEXT(lop, nfslo_list);
2419 nfscl_freelock(tlop, local);
2420 continue;
2421 }
2422
2423 /*
2424 * All these cases are for contiguous locks that are not the
2425 * same type, so they can't be merged.
2426 */
2427 if (new_lop->nfslo_first <= lop->nfslo_first) {
2428 /*
2429 * This case is where the new lock overlaps with the
2430 * first part of the old lock. Move the start of the
2431 * old lock to just past the end of the new lock. The
2432 * new lock will be inserted in front of the old, since
2433 * ilop hasn't been updated. (We are done now.)
2434 */
2435 if (lop->nfslo_first != new_lop->nfslo_end) {
2436 lop->nfslo_first = new_lop->nfslo_end;
2437 modified = 1;
2438 }
2439 break;
2440 }
2441 if (new_lop->nfslo_end >= lop->nfslo_end) {
2442 /*
2443 * This case is where the new lock overlaps with the
2444 * end of the old lock's range. Move the old lock's
2445 * end to just before the new lock's first and insert
2446 * the new lock after the old lock.
2447 * Might not be done yet, since the new lock could
2448 * overlap further locks with higher ranges.
2449 */
2450 if (lop->nfslo_end != new_lop->nfslo_first) {
2451 lop->nfslo_end = new_lop->nfslo_first;
2452 modified = 1;
2453 }
2454 ilop = lop;
2455 lop = LIST_NEXT(lop, nfslo_list);
2456 continue;
2457 }
2458 /*
2459 * The final case is where the new lock's range is in the
2460 * middle of the current lock's and splits the current lock
2461 * up. Use *other_lopp to handle the second part of the
2462 * split old lock range. (We are done now.)
2463 * For unlock, we use new_lop as other_lop and tmp, since
2464 * other_lop and new_lop are the same for this case.
2465 * We noted the unlock case above, so we don't need
2466 * new_lop->nfslo_type any longer.
2467 */
2468 tmp = new_lop->nfslo_first;
2469 if (unlock) {
2470 other_lop = new_lop;
2471 *new_lopp = NULL;
2472 } else {
2473 other_lop = *other_lopp;
2474 *other_lopp = NULL;
2475 }
2476 other_lop->nfslo_first = new_lop->nfslo_end;
2477 other_lop->nfslo_end = lop->nfslo_end;
2478 other_lop->nfslo_type = lop->nfslo_type;
2479 lop->nfslo_end = tmp;
2480 nfscl_insertlock(lp, other_lop, lop, local);
2481 ilop = lop;
2482 modified = 1;
2483 break;
2484 }
2485 ilop = lop;
2486 lop = LIST_NEXT(lop, nfslo_list);
2487 if (lop == NULL)
2488 break;
2489 }
2490
2491 /*
2492 * Insert the new lock in the list at the appropriate place.
2493 */
2494 if (!unlock) {
2495 nfscl_insertlock(lp, new_lop, ilop, local);
2496 *new_lopp = NULL;
2497 modified = 1;
2498 }
2499 return (modified);
2500 }
2501
2502 /*
2503 * This function must be run as a kernel thread.
2504 * It does Renew Ops and recovery, when required.
2505 */
2506 APPLESTATIC void
2507 nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
2508 {
2509 struct nfsclowner *owp, *nowp;
2510 struct nfsclopen *op;
2511 struct nfscllockowner *lp, *nlp;
2512 struct nfscldeleghead dh;
2513 struct nfscldeleg *dp, *ndp;
2514 struct ucred *cred;
2515 u_int32_t clidrev;
2516 int error, cbpathdown, islept, igotlock, ret, clearok;
2517 uint32_t recover_done_time = 0;
2518 time_t mytime;
2519 static time_t prevsec = 0;
2520 struct nfscllockownerfh *lfhp, *nlfhp;
2521 struct nfscllockownerfhhead lfh;
2522 struct nfscllayout *lyp, *nlyp;
2523 struct nfscldevinfo *dip, *ndip;
2524 struct nfscllayouthead rlh;
2525 struct nfsclrecalllayout *recallp;
2526 struct nfsclds *dsp;
2527
2528 cred = newnfs_getcred();
2529 NFSLOCKCLSTATE();
2530 clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD;
2531 NFSUNLOCKCLSTATE();
2532 for(;;) {
2533 newnfs_setroot(cred);
2534 cbpathdown = 0;
2535 if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) {
2536 /*
2537 * Only allow one recover within 1/2 of the lease
2538 * duration (nfsc_renew).
2539 */
2540 if (recover_done_time < NFSD_MONOSEC) {
2541 recover_done_time = NFSD_MONOSEC +
2542 clp->nfsc_renew;
2543 NFSCL_DEBUG(1, "Doing recovery..\n");
2544 nfscl_recover(clp, cred, p);
2545 } else {
2546 NFSCL_DEBUG(1, "Clear Recovery dt=%u ms=%jd\n",
2547 recover_done_time, (intmax_t)NFSD_MONOSEC);
2548 NFSLOCKCLSTATE();
2549 clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER;
2550 NFSUNLOCKCLSTATE();
2551 }
2552 }
2553 if (clp->nfsc_expire <= NFSD_MONOSEC &&
2554 (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) {
2555 clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew;
2556 clidrev = clp->nfsc_clientidrev;
2557 error = nfsrpc_renew(clp, NULL, cred, p);
2558 if (error == NFSERR_CBPATHDOWN)
2559 cbpathdown = 1;
2560 else if (error == NFSERR_STALECLIENTID ||
2561 error == NFSERR_BADSESSION) {
2562 NFSLOCKCLSTATE();
2563 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2564 NFSUNLOCKCLSTATE();
2565 } else if (error == NFSERR_EXPIRED)
2566 (void) nfscl_hasexpired(clp, clidrev, p);
2567 }
2568
2569 checkdsrenew:
2570 if (NFSHASNFSV4N(clp->nfsc_nmp)) {
2571 /* Do renews for any DS sessions. */
2572 NFSLOCKMNT(clp->nfsc_nmp);
2573 /* Skip first entry, since the MDS is handled above. */
2574 dsp = TAILQ_FIRST(&clp->nfsc_nmp->nm_sess);
2575 if (dsp != NULL)
2576 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2577 while (dsp != NULL) {
2578 if (dsp->nfsclds_expire <= NFSD_MONOSEC &&
2579 dsp->nfsclds_sess.nfsess_defunct == 0) {
2580 dsp->nfsclds_expire = NFSD_MONOSEC +
2581 clp->nfsc_renew;
2582 NFSUNLOCKMNT(clp->nfsc_nmp);
2583 (void)nfsrpc_renew(clp, dsp, cred, p);
2584 goto checkdsrenew;
2585 }
2586 dsp = TAILQ_NEXT(dsp, nfsclds_list);
2587 }
2588 NFSUNLOCKMNT(clp->nfsc_nmp);
2589 }
2590
2591 TAILQ_INIT(&dh);
2592 NFSLOCKCLSTATE();
2593 if (cbpathdown)
2594 /* It's a Total Recall! */
2595 nfscl_totalrecall(clp);
2596
2597 /*
2598 * Now, handle defunct owners.
2599 */
2600 LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
2601 if (LIST_EMPTY(&owp->nfsow_open)) {
2602 if (owp->nfsow_defunct != 0)
2603 nfscl_freeopenowner(owp, 0);
2604 }
2605 }
2606
2607 /*
2608 * Do the recall on any delegations. To avoid trouble, always
2609 * come back up here after having slept.
2610 */
2611 igotlock = 0;
2612 tryagain:
2613 dp = TAILQ_FIRST(&clp->nfsc_deleg);
2614 while (dp != NULL) {
2615 ndp = TAILQ_NEXT(dp, nfsdl_list);
2616 if ((dp->nfsdl_flags & NFSCLDL_RECALL)) {
2617 /*
2618 * Wait for outstanding I/O ops to be done.
2619 */
2620 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
2621 if (igotlock) {
2622 nfsv4_unlock(&clp->nfsc_lock, 0);
2623 igotlock = 0;
2624 }
2625 dp->nfsdl_rwlock.nfslock_lock |=
2626 NFSV4LOCK_WANTED;
2627 (void) nfsmsleep(&dp->nfsdl_rwlock,
2628 NFSCLSTATEMUTEXPTR, PZERO, "nfscld",
2629 NULL);
2630 goto tryagain;
2631 }
2632 while (!igotlock) {
2633 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
2634 &islept, NFSCLSTATEMUTEXPTR, NULL);
2635 if (islept)
2636 goto tryagain;
2637 }
2638 NFSUNLOCKCLSTATE();
2639 newnfs_copycred(&dp->nfsdl_cred, cred);
2640 ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp,
2641 NULL, cred, p, 1);
2642 if (!ret) {
2643 nfscl_cleandeleg(dp);
2644 TAILQ_REMOVE(&clp->nfsc_deleg, dp,
2645 nfsdl_list);
2646 LIST_REMOVE(dp, nfsdl_hash);
2647 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2648 nfscl_delegcnt--;
2649 newnfsstats.cldelegates--;
2650 }
2651 NFSLOCKCLSTATE();
2652 }
2653 dp = ndp;
2654 }
2655
2656 /*
2657 * Clear out old delegations, if we are above the high water
2658 * mark. Only clear out ones with no state related to them.
2659 * The tailq list is in LRU order.
2660 */
2661 dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead);
2662 while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) {
2663 ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list);
2664 if (dp->nfsdl_rwlock.nfslock_usecnt == 0 &&
2665 dp->nfsdl_rwlock.nfslock_lock == 0 &&
2666 dp->nfsdl_timestamp < NFSD_MONOSEC &&
2667 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED |
2668 NFSCLDL_NEEDRECLAIM | NFSCLDL_DELEGRET)) == 0) {
2669 clearok = 1;
2670 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2671 op = LIST_FIRST(&owp->nfsow_open);
2672 if (op != NULL) {
2673 clearok = 0;
2674 break;
2675 }
2676 }
2677 if (clearok) {
2678 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
2679 if (!LIST_EMPTY(&lp->nfsl_lock)) {
2680 clearok = 0;
2681 break;
2682 }
2683 }
2684 }
2685 if (clearok) {
2686 TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list);
2687 LIST_REMOVE(dp, nfsdl_hash);
2688 TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
2689 nfscl_delegcnt--;
2690 newnfsstats.cldelegates--;
2691 }
2692 }
2693 dp = ndp;
2694 }
2695 if (igotlock)
2696 nfsv4_unlock(&clp->nfsc_lock, 0);
2697
2698 /*
2699 * Do the recall on any layouts. To avoid trouble, always
2700 * come back up here after having slept.
2701 */
2702 TAILQ_INIT(&rlh);
2703 tryagain2:
2704 TAILQ_FOREACH_SAFE(lyp, &clp->nfsc_layout, nfsly_list, nlyp) {
2705 if ((lyp->nfsly_flags & NFSLY_RECALL) != 0) {
2706 /*
2707 * Wait for outstanding I/O ops to be done.
2708 */
2709 if (lyp->nfsly_lock.nfslock_usecnt > 0 ||
2710 (lyp->nfsly_lock.nfslock_lock &
2711 NFSV4LOCK_LOCK) != 0) {
2712 lyp->nfsly_lock.nfslock_lock |=
2713 NFSV4LOCK_WANTED;
2714 (void)nfsmsleep(&lyp->nfsly_lock,
2715 NFSCLSTATEMUTEXPTR, PZERO, "nfslyp",
2716 NULL);
2717 goto tryagain2;
2718 }
2719 /* Move the layout to the recall list. */
2720 TAILQ_REMOVE(&clp->nfsc_layout, lyp,
2721 nfsly_list);
2722 LIST_REMOVE(lyp, nfsly_hash);
2723 TAILQ_INSERT_HEAD(&rlh, lyp, nfsly_list);
2724
2725 /* Handle any layout commits. */
2726 if (!NFSHASNOLAYOUTCOMMIT(clp->nfsc_nmp) &&
2727 (lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
2728 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
2729 NFSUNLOCKCLSTATE();
2730 NFSCL_DEBUG(3, "do layoutcommit\n");
2731 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp,
2732 cred, p);
2733 NFSLOCKCLSTATE();
2734 goto tryagain2;
2735 }
2736 }
2737 }
2738
2739 /* Now, look for stale layouts. */
2740 lyp = TAILQ_LAST(&clp->nfsc_layout, nfscllayouthead);
2741 while (lyp != NULL) {
2742 nlyp = TAILQ_PREV(lyp, nfscllayouthead, nfsly_list);
2743 if (lyp->nfsly_timestamp < NFSD_MONOSEC &&
2744 (lyp->nfsly_flags & NFSLY_RECALL) == 0 &&
2745 lyp->nfsly_lock.nfslock_usecnt == 0 &&
2746 lyp->nfsly_lock.nfslock_lock == 0) {
2747 NFSCL_DEBUG(4, "ret stale lay=%d\n",
2748 nfscl_layoutcnt);
2749 recallp = malloc(sizeof(*recallp),
2750 M_NFSLAYRECALL, M_NOWAIT);
2751 if (recallp == NULL)
2752 break;
2753 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE,
2754 lyp, NFSLAYOUTIOMODE_ANY, 0, UINT64_MAX,
2755 lyp->nfsly_stateid.seqid, recallp);
2756 }
2757 lyp = nlyp;
2758 }
2759
2760 /*
2761 * Free up any unreferenced device info structures.
2762 */
2763 LIST_FOREACH_SAFE(dip, &clp->nfsc_devinfo, nfsdi_list, ndip) {
2764 if (dip->nfsdi_layoutrefs == 0 &&
2765 dip->nfsdi_refcnt == 0) {
2766 NFSCL_DEBUG(4, "freeing devinfo\n");
2767 LIST_REMOVE(dip, nfsdi_list);
2768 nfscl_freedevinfo(dip);
2769 }
2770 }
2771 NFSUNLOCKCLSTATE();
2772
2773 /* Do layout return(s), as required. */
2774 TAILQ_FOREACH_SAFE(lyp, &rlh, nfsly_list, nlyp) {
2775 TAILQ_REMOVE(&rlh, lyp, nfsly_list);
2776 NFSCL_DEBUG(4, "ret layout\n");
2777 nfscl_layoutreturn(clp->nfsc_nmp, lyp, cred, p);
2778 nfscl_freelayout(lyp);
2779 }
2780
2781 /*
2782 * Delegreturn any delegations cleaned out or recalled.
2783 */
2784 TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) {
2785 newnfs_copycred(&dp->nfsdl_cred, cred);
2786 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
2787 TAILQ_REMOVE(&dh, dp, nfsdl_list);
2788 FREE((caddr_t)dp, M_NFSCLDELEG);
2789 }
2790
2791 SLIST_INIT(&lfh);
2792 /*
2793 * Call nfscl_cleanupkext() once per second to check for
2794 * open/lock owners where the process has exited.
2795 */
2796 mytime = NFSD_MONOSEC;
2797 if (prevsec != mytime) {
2798 prevsec = mytime;
2799 nfscl_cleanupkext(clp, &lfh);
2800 }
2801
2802 /*
2803 * Do a ReleaseLockOwner for all lock owners where the
2804 * associated process no longer exists, as found by
2805 * nfscl_cleanupkext().
2806 */
2807 newnfs_setroot(cred);
2808 SLIST_FOREACH_SAFE(lfhp, &lfh, nfslfh_list, nlfhp) {
2809 LIST_FOREACH_SAFE(lp, &lfhp->nfslfh_lock, nfsl_list,
2810 nlp) {
2811 (void)nfsrpc_rellockown(clp->nfsc_nmp, lp,
2812 lfhp->nfslfh_fh, lfhp->nfslfh_len, cred,
2813 p);
2814 nfscl_freelockowner(lp, 0);
2815 }
2816 free(lfhp, M_TEMP);
2817 }
2818 SLIST_INIT(&lfh);
2819
2820 NFSLOCKCLSTATE();
2821 if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0)
2822 (void)mtx_sleep(clp, NFSCLSTATEMUTEXPTR, PWAIT, "nfscl",
2823 hz);
2824 if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) {
2825 clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD;
2826 NFSUNLOCKCLSTATE();
2827 NFSFREECRED(cred);
2828 wakeup((caddr_t)clp);
2829 return;
2830 }
2831 NFSUNLOCKCLSTATE();
2832 }
2833 }
2834
2835 /*
2836 * Initiate state recovery. Called when NFSERR_STALECLIENTID,
2837 * NFSERR_STALESTATEID or NFSERR_BADSESSION is received.
2838 */
2839 APPLESTATIC void
2840 nfscl_initiate_recovery(struct nfsclclient *clp)
2841 {
2842
2843 if (clp == NULL)
2844 return;
2845 NFSLOCKCLSTATE();
2846 clp->nfsc_flags |= NFSCLFLAGS_RECOVER;
2847 NFSUNLOCKCLSTATE();
2848 wakeup((caddr_t)clp);
2849 }
2850
2851 /*
2852 * Dump out the state stuff for debugging.
2853 */
2854 APPLESTATIC void
2855 nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens,
2856 int lockowner, int locks)
2857 {
2858 struct nfsclclient *clp;
2859 struct nfsclowner *owp;
2860 struct nfsclopen *op;
2861 struct nfscllockowner *lp;
2862 struct nfscllock *lop;
2863 struct nfscldeleg *dp;
2864
2865 clp = nmp->nm_clp;
2866 if (clp == NULL) {
2867 printf("nfscl dumpstate NULL clp\n");
2868 return;
2869 }
2870 NFSLOCKCLSTATE();
2871 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
2872 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
2873 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2874 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2875 owp->nfsow_owner[0], owp->nfsow_owner[1],
2876 owp->nfsow_owner[2], owp->nfsow_owner[3],
2877 owp->nfsow_seqid);
2878 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2879 if (opens)
2880 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2881 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2882 op->nfso_stateid.other[2], op->nfso_opencnt,
2883 op->nfso_fh[12]);
2884 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2885 if (lockowner)
2886 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2887 lp->nfsl_owner[0], lp->nfsl_owner[1],
2888 lp->nfsl_owner[2], lp->nfsl_owner[3],
2889 lp->nfsl_seqid,
2890 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2891 lp->nfsl_stateid.other[2]);
2892 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2893 if (locks)
2894 #ifdef __FreeBSD__
2895 printf("lck typ=%d fst=%ju end=%ju\n",
2896 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2897 (intmax_t)lop->nfslo_end);
2898 #else
2899 printf("lck typ=%d fst=%qd end=%qd\n",
2900 lop->nfslo_type, lop->nfslo_first,
2901 lop->nfslo_end);
2902 #endif
2903 }
2904 }
2905 }
2906 }
2907 }
2908 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2909 if (openowner && !LIST_EMPTY(&owp->nfsow_open))
2910 printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n",
2911 owp->nfsow_owner[0], owp->nfsow_owner[1],
2912 owp->nfsow_owner[2], owp->nfsow_owner[3],
2913 owp->nfsow_seqid);
2914 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2915 if (opens)
2916 printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n",
2917 op->nfso_stateid.other[0], op->nfso_stateid.other[1],
2918 op->nfso_stateid.other[2], op->nfso_opencnt,
2919 op->nfso_fh[12]);
2920 LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) {
2921 if (lockowner)
2922 printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n",
2923 lp->nfsl_owner[0], lp->nfsl_owner[1],
2924 lp->nfsl_owner[2], lp->nfsl_owner[3],
2925 lp->nfsl_seqid,
2926 lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1],
2927 lp->nfsl_stateid.other[2]);
2928 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
2929 if (locks)
2930 #ifdef __FreeBSD__
2931 printf("lck typ=%d fst=%ju end=%ju\n",
2932 lop->nfslo_type, (intmax_t)lop->nfslo_first,
2933 (intmax_t)lop->nfslo_end);
2934 #else
2935 printf("lck typ=%d fst=%qd end=%qd\n",
2936 lop->nfslo_type, lop->nfslo_first,
2937 lop->nfslo_end);
2938 #endif
2939 }
2940 }
2941 }
2942 }
2943 NFSUNLOCKCLSTATE();
2944 }
2945
2946 /*
2947 * Check for duplicate open owners and opens.
2948 * (Only used as a diagnostic aid.)
2949 */
2950 APPLESTATIC void
2951 nfscl_dupopen(vnode_t vp, int dupopens)
2952 {
2953 struct nfsclclient *clp;
2954 struct nfsclowner *owp, *owp2;
2955 struct nfsclopen *op, *op2;
2956 struct nfsfh *nfhp;
2957
2958 clp = VFSTONFS(vnode_mount(vp))->nm_clp;
2959 if (clp == NULL) {
2960 printf("nfscl dupopen NULL clp\n");
2961 return;
2962 }
2963 nfhp = VTONFS(vp)->n_fhp;
2964 NFSLOCKCLSTATE();
2965
2966 /*
2967 * First, search for duplicate owners.
2968 * These should never happen!
2969 */
2970 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2971 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2972 if (owp != owp2 &&
2973 !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner,
2974 NFSV4CL_LOCKNAMELEN)) {
2975 NFSUNLOCKCLSTATE();
2976 printf("DUP OWNER\n");
2977 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0);
2978 return;
2979 }
2980 }
2981 }
2982
2983 /*
2984 * Now, search for duplicate stateids.
2985 * These shouldn't happen, either.
2986 */
2987 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
2988 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
2989 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
2990 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
2991 if (op != op2 &&
2992 (op->nfso_stateid.other[0] != 0 ||
2993 op->nfso_stateid.other[1] != 0 ||
2994 op->nfso_stateid.other[2] != 0) &&
2995 op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] &&
2996 op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] &&
2997 op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) {
2998 NFSUNLOCKCLSTATE();
2999 printf("DUP STATEID\n");
3000 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0,
3001 0);
3002 return;
3003 }
3004 }
3005 }
3006 }
3007 }
3008
3009 /*
3010 * Now search for duplicate opens.
3011 * Duplicate opens for the same owner
3012 * should never occur. Other duplicates are
3013 * possible and are checked for if "dupopens"
3014 * is true.
3015 */
3016 LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) {
3017 LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) {
3018 if (nfhp->nfh_len == op2->nfso_fhlen &&
3019 !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) {
3020 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3021 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3022 if (op != op2 && nfhp->nfh_len == op->nfso_fhlen &&
3023 !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) &&
3024 (!NFSBCMP(op->nfso_own->nfsow_owner,
3025 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) ||
3026 dupopens)) {
3027 if (!NFSBCMP(op->nfso_own->nfsow_owner,
3028 op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3029 NFSUNLOCKCLSTATE();
3030 printf("BADDUP OPEN\n");
3031 } else {
3032 NFSUNLOCKCLSTATE();
3033 printf("DUP OPEN\n");
3034 }
3035 nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1,
3036 0, 0);
3037 return;
3038 }
3039 }
3040 }
3041 }
3042 }
3043 }
3044 NFSUNLOCKCLSTATE();
3045 }
3046
3047 /*
3048 * During close, find an open that needs to be dereferenced and
3049 * dereference it. If there are no more opens for this file,
3050 * log a message to that effect.
3051 * Opens aren't actually Close'd until VOP_INACTIVE() is performed
3052 * on the file's vnode.
3053 * This is the safe way, since it is difficult to identify
3054 * which open the close is for and I/O can be performed after the
3055 * close(2) system call when a file is mmap'd.
3056 * If it returns 0 for success, there will be a referenced
3057 * clp returned via clpp.
3058 */
3059 APPLESTATIC int
3060 nfscl_getclose(vnode_t vp, struct nfsclclient **clpp)
3061 {
3062 struct nfsclclient *clp;
3063 struct nfsclowner *owp;
3064 struct nfsclopen *op;
3065 struct nfscldeleg *dp;
3066 struct nfsfh *nfhp;
3067 int error, notdecr;
3068
3069 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3070 if (error)
3071 return (error);
3072 *clpp = clp;
3073
3074 nfhp = VTONFS(vp)->n_fhp;
3075 notdecr = 1;
3076 NFSLOCKCLSTATE();
3077 /*
3078 * First, look for one under a delegation that was locally issued
3079 * and just decrement the opencnt for it. Since all my Opens against
3080 * the server are DENY_NONE, I don't see a problem with hanging
3081 * onto them. (It is much easier to use one of the extant Opens
3082 * that I already have on the server when a Delegation is recalled
3083 * than to do fresh Opens.) Someday, I might need to rethink this, but.
3084 */
3085 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3086 if (dp != NULL) {
3087 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
3088 op = LIST_FIRST(&owp->nfsow_open);
3089 if (op != NULL) {
3090 /*
3091 * Since a delegation is for a file, there
3092 * should never be more than one open for
3093 * each openowner.
3094 */
3095 if (LIST_NEXT(op, nfso_list) != NULL)
3096 panic("nfscdeleg opens");
3097 if (notdecr && op->nfso_opencnt > 0) {
3098 notdecr = 0;
3099 op->nfso_opencnt--;
3100 break;
3101 }
3102 }
3103 }
3104 }
3105
3106 /* Now process the opens against the server. */
3107 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3108 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3109 if (op->nfso_fhlen == nfhp->nfh_len &&
3110 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3111 nfhp->nfh_len)) {
3112 /* Found an open, decrement cnt if possible */
3113 if (notdecr && op->nfso_opencnt > 0) {
3114 notdecr = 0;
3115 op->nfso_opencnt--;
3116 }
3117 /*
3118 * There are more opens, so just return.
3119 */
3120 if (op->nfso_opencnt > 0) {
3121 NFSUNLOCKCLSTATE();
3122 return (0);
3123 }
3124 }
3125 }
3126 }
3127 NFSUNLOCKCLSTATE();
3128 if (notdecr)
3129 printf("nfscl: never fnd open\n");
3130 return (0);
3131 }
3132
3133 APPLESTATIC int
3134 nfscl_doclose(vnode_t vp, struct nfsclclient **clpp, NFSPROC_T *p)
3135 {
3136 struct nfsclclient *clp;
3137 struct nfsclowner *owp, *nowp;
3138 struct nfsclopen *op;
3139 struct nfscldeleg *dp;
3140 struct nfsfh *nfhp;
3141 struct nfsclrecalllayout *recallp;
3142 int error;
3143
3144 error = nfscl_getcl(vnode_mount(vp), NULL, NULL, 1, &clp);
3145 if (error)
3146 return (error);
3147 *clpp = clp;
3148
3149 nfhp = VTONFS(vp)->n_fhp;
3150 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL, M_WAITOK);
3151 NFSLOCKCLSTATE();
3152 /*
3153 * First get rid of the local Open structures, which should be no
3154 * longer in use.
3155 */
3156 dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len);
3157 if (dp != NULL) {
3158 LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) {
3159 op = LIST_FIRST(&owp->nfsow_open);
3160 if (op != NULL) {
3161 KASSERT((op->nfso_opencnt == 0),
3162 ("nfscl: bad open cnt on deleg"));
3163 nfscl_freeopen(op, 1);
3164 }
3165 nfscl_freeopenowner(owp, 1);
3166 }
3167 }
3168
3169 /* Return any layouts marked return on close. */
3170 nfscl_retoncloselayout(vp, clp, nfhp->nfh_fh, nfhp->nfh_len, &recallp);
3171
3172 /* Now process the opens against the server. */
3173 lookformore:
3174 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3175 op = LIST_FIRST(&owp->nfsow_open);
3176 while (op != NULL) {
3177 if (op->nfso_fhlen == nfhp->nfh_len &&
3178 !NFSBCMP(op->nfso_fh, nfhp->nfh_fh,
3179 nfhp->nfh_len)) {
3180 /* Found an open, close it. */
3181 KASSERT((op->nfso_opencnt == 0),
3182 ("nfscl: bad open cnt on server"));
3183 NFSUNLOCKCLSTATE();
3184 nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), op,
3185 p);
3186 NFSLOCKCLSTATE();
3187 goto lookformore;
3188 }
3189 op = LIST_NEXT(op, nfso_list);
3190 }
3191 }
3192 NFSUNLOCKCLSTATE();
3193 /*
3194 * recallp has been set NULL by nfscl_retoncloselayout() if it was
3195 * used by the function, but calling free() with a NULL pointer is ok.
3196 */
3197 free(recallp, M_NFSLAYRECALL);
3198 return (0);
3199 }
3200
3201 /*
3202 * Return all delegations on this client.
3203 * (Must be called with client sleep lock.)
3204 */
3205 static void
3206 nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p)
3207 {
3208 struct nfscldeleg *dp, *ndp;
3209 struct ucred *cred;
3210
3211 cred = newnfs_getcred();
3212 TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) {
3213 nfscl_cleandeleg(dp);
3214 (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p);
3215 nfscl_freedeleg(&clp->nfsc_deleg, dp);
3216 }
3217 NFSFREECRED(cred);
3218 }
3219
3220 /*
3221 * Do a callback RPC.
3222 */
3223 APPLESTATIC void
3224 nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
3225 {
3226 int clist, gotseq_ok, i, j, k, op, rcalls;
3227 u_int32_t *tl;
3228 struct nfsclclient *clp;
3229 struct nfscldeleg *dp = NULL;
3230 int numops, taglen = -1, error = 0, trunc;
3231 u_int32_t minorvers = 0, retops = 0, *retopsp = NULL, *repp, cbident;
3232 u_char tag[NFSV4_SMALLSTR + 1], *tagstr;
3233 vnode_t vp = NULL;
3234 struct nfsnode *np;
3235 struct vattr va;
3236 struct nfsfh *nfhp;
3237 mount_t mp;
3238 nfsattrbit_t attrbits, rattrbits;
3239 nfsv4stateid_t stateid;
3240 uint32_t seqid, slotid = 0, highslot, cachethis;
3241 uint8_t sessionid[NFSX_V4SESSIONID];
3242 struct mbuf *rep;
3243 struct nfscllayout *lyp;
3244 uint64_t filesid[2], len, off;
3245 int changed, gotone, laytype, recalltype;
3246 uint32_t iomode;
3247 struct nfsclrecalllayout *recallp = NULL;
3248 struct nfsclsession *tsep;
3249
3250 gotseq_ok = 0;
3251 nfsrvd_rephead(nd);
3252 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3253 taglen = fxdr_unsigned(int, *tl);
3254 if (taglen < 0) {
3255 error = EBADRPC;
3256 goto nfsmout;
3257 }
3258 if (taglen <= NFSV4_SMALLSTR)
3259 tagstr = tag;
3260 else
3261 tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK);
3262 error = nfsrv_mtostr(nd, tagstr, taglen);
3263 if (error) {
3264 if (taglen > NFSV4_SMALLSTR)
3265 free(tagstr, M_TEMP);
3266 taglen = -1;
3267 goto nfsmout;
3268 }
3269 (void) nfsm_strtom(nd, tag, taglen);
3270 if (taglen > NFSV4_SMALLSTR) {
3271 free(tagstr, M_TEMP);
3272 }
3273 NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED);
3274 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3275 minorvers = fxdr_unsigned(u_int32_t, *tl++);
3276 if (minorvers != NFSV4_MINORVERSION && minorvers != NFSV41_MINORVERSION)
3277 nd->nd_repstat = NFSERR_MINORVERMISMATCH;
3278 cbident = fxdr_unsigned(u_int32_t, *tl++);
3279 if (nd->nd_repstat)
3280 numops = 0;
3281 else
3282 numops = fxdr_unsigned(int, *tl);
3283 /*
3284 * Loop around doing the sub ops.
3285 */
3286 for (i = 0; i < numops; i++) {
3287 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
3288 NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED);
3289 *repp++ = *tl;
3290 op = fxdr_unsigned(int, *tl);
3291 if (op < NFSV4OP_CBGETATTR ||
3292 (op > NFSV4OP_CBRECALL && minorvers == NFSV4_MINORVERSION) ||
3293 (op > NFSV4OP_CBNOTIFYDEVID &&
3294 minorvers == NFSV41_MINORVERSION)) {
3295 nd->nd_repstat = NFSERR_OPILLEGAL;
3296 *repp = nfscl_errmap(nd, minorvers);
3297 retops++;
3298 break;
3299 }
3300 nd->nd_procnum = op;
3301 if (op < NFSV4OP_CBNOPS)
3302 newnfsstats.cbrpccnt[nd->nd_procnum]++;
3303 switch (op) {
3304 case NFSV4OP_CBGETATTR:
3305 NFSCL_DEBUG(4, "cbgetattr\n");
3306 mp = NULL;
3307 vp = NULL;
3308 error = nfsm_getfh(nd, &nfhp);
3309 if (!error)
3310 error = nfsrv_getattrbits(nd, &attrbits,
3311 NULL, NULL);
3312 if (error == 0 && i == 0 &&
3313 minorvers != NFSV4_MINORVERSION)
3314 error = NFSERR_OPNOTINSESS;
3315 if (!error) {
3316 mp = nfscl_getmnt(minorvers, sessionid, cbident,
3317 &clp);
3318 if (mp == NULL)
3319 error = NFSERR_SERVERFAULT;
3320 }
3321 if (!error) {
3322 error = nfscl_ngetreopen(mp, nfhp->nfh_fh,
3323 nfhp->nfh_len, p, &np);
3324 if (!error)
3325 vp = NFSTOV(np);
3326 }
3327 if (!error) {
3328 NFSZERO_ATTRBIT(&rattrbits);
3329 NFSLOCKCLSTATE();
3330 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3331 nfhp->nfh_len);
3332 if (dp != NULL) {
3333 if (NFSISSET_ATTRBIT(&attrbits,
3334 NFSATTRBIT_SIZE)) {
3335 if (vp != NULL)
3336 va.va_size = np->n_size;
3337 else
3338 va.va_size =
3339 dp->nfsdl_size;
3340 NFSSETBIT_ATTRBIT(&rattrbits,
3341 NFSATTRBIT_SIZE);
3342 }
3343 if (NFSISSET_ATTRBIT(&attrbits,
3344 NFSATTRBIT_CHANGE)) {
3345 va.va_filerev =
3346 dp->nfsdl_change;
3347 if (vp == NULL ||
3348 (np->n_flag & NDELEGMOD))
3349 va.va_filerev++;
3350 NFSSETBIT_ATTRBIT(&rattrbits,
3351 NFSATTRBIT_CHANGE);
3352 }
3353 } else
3354 error = NFSERR_SERVERFAULT;
3355 NFSUNLOCKCLSTATE();
3356 }
3357 if (vp != NULL)
3358 vrele(vp);
3359 if (mp != NULL)
3360 vfs_unbusy(mp);
3361 if (nfhp != NULL)
3362 FREE((caddr_t)nfhp, M_NFSFH);
3363 if (!error)
3364 (void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
3365 NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
3366 (uint64_t)0);
3367 break;
3368 case NFSV4OP_CBRECALL:
3369 NFSCL_DEBUG(4, "cbrecall\n");
3370 NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID +
3371 NFSX_UNSIGNED);
3372 stateid.seqid = *tl++;
3373 NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other,
3374 NFSX_STATEIDOTHER);
3375 tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED);
3376 trunc = fxdr_unsigned(int, *tl);
3377 error = nfsm_getfh(nd, &nfhp);
3378 if (error == 0 && i == 0 &&
3379 minorvers != NFSV4_MINORVERSION)
3380 error = NFSERR_OPNOTINSESS;
3381 if (!error) {
3382 NFSLOCKCLSTATE();
3383 if (minorvers == NFSV4_MINORVERSION)
3384 clp = nfscl_getclnt(cbident);
3385 else
3386 clp = nfscl_getclntsess(sessionid);
3387 if (clp != NULL) {
3388 dp = nfscl_finddeleg(clp, nfhp->nfh_fh,
3389 nfhp->nfh_len);
3390 if (dp != NULL && (dp->nfsdl_flags &
3391 NFSCLDL_DELEGRET) == 0) {
3392 dp->nfsdl_flags |=
3393 NFSCLDL_RECALL;
3394 wakeup((caddr_t)clp);
3395 }
3396 } else {
3397 error = NFSERR_SERVERFAULT;
3398 }
3399 NFSUNLOCKCLSTATE();
3400 }
3401 if (nfhp != NULL)
3402 FREE((caddr_t)nfhp, M_NFSFH);
3403 break;
3404 case NFSV4OP_CBLAYOUTRECALL:
3405 NFSCL_DEBUG(4, "cblayrec\n");
3406 nfhp = NULL;
3407 NFSM_DISSECT(tl, uint32_t *, 4 * NFSX_UNSIGNED);
3408 laytype = fxdr_unsigned(int, *tl++);
3409 iomode = fxdr_unsigned(uint32_t, *tl++);
3410 if (newnfs_true == *tl++)
3411 changed = 1;
3412 else
3413 changed = 0;
3414 recalltype = fxdr_unsigned(int, *tl);
3415 recallp = malloc(sizeof(*recallp), M_NFSLAYRECALL,
3416 M_WAITOK);
3417 if (laytype != NFSLAYOUT_NFSV4_1_FILES)
3418 error = NFSERR_NOMATCHLAYOUT;
3419 else if (recalltype == NFSLAYOUTRETURN_FILE) {
3420 error = nfsm_getfh(nd, &nfhp);
3421 NFSCL_DEBUG(4, "retfile getfh=%d\n", error);
3422 if (error != 0)
3423 goto nfsmout;
3424 NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER +
3425 NFSX_STATEID);
3426 off = fxdr_hyper(tl); tl += 2;
3427 len = fxdr_hyper(tl); tl += 2;
3428 stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
3429 NFSBCOPY(tl, stateid.other, NFSX_STATEIDOTHER);
3430 if (minorvers == NFSV4_MINORVERSION)
3431 error = NFSERR_NOTSUPP;
3432 else if (i == 0)
3433 error = NFSERR_OPNOTINSESS;
3434 if (error == 0) {
3435 NFSLOCKCLSTATE();
3436 clp = nfscl_getclntsess(sessionid);
3437 NFSCL_DEBUG(4, "cbly clp=%p\n", clp);
3438 if (clp != NULL) {
3439 lyp = nfscl_findlayout(clp,
3440 nfhp->nfh_fh,
3441 nfhp->nfh_len);
3442 NFSCL_DEBUG(4, "cblyp=%p\n",
3443 lyp);
3444 if (lyp != NULL &&
3445 (lyp->nfsly_flags &
3446 NFSLY_FILES) != 0 &&
3447 !NFSBCMP(stateid.other,
3448 lyp->nfsly_stateid.other,
3449 NFSX_STATEIDOTHER)) {
3450 error =
3451 nfscl_layoutrecall(
3452 recalltype,
3453 lyp, iomode, off,
3454 len, stateid.seqid,
3455 recallp);
3456 recallp = NULL;
3457 wakeup(clp);
3458 NFSCL_DEBUG(4,
3459 "aft layrcal=%d\n",
3460 error);
3461 } else
3462 error =
3463 NFSERR_NOMATCHLAYOUT;
3464 } else
3465 error = NFSERR_NOMATCHLAYOUT;
3466 NFSUNLOCKCLSTATE();
3467 }
3468 free(nfhp, M_NFSFH);
3469 } else if (recalltype == NFSLAYOUTRETURN_FSID) {
3470 NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_HYPER);
3471 filesid[0] = fxdr_hyper(tl); tl += 2;
3472 filesid[1] = fxdr_hyper(tl); tl += 2;
3473 gotone = 0;
3474 NFSLOCKCLSTATE();
3475 clp = nfscl_getclntsess(sessionid);
3476 if (clp != NULL) {
3477 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3478 nfsly_list) {
3479 if (lyp->nfsly_filesid[0] ==
3480 filesid[0] &&
3481 lyp->nfsly_filesid[1] ==
3482 filesid[1]) {
3483 error =
3484 nfscl_layoutrecall(
3485 recalltype,
3486 lyp, iomode, 0,
3487 UINT64_MAX,
3488 lyp->nfsly_stateid.seqid,
3489 recallp);
3490 recallp = NULL;
3491 gotone = 1;
3492 }
3493 }
3494 if (gotone != 0)
3495 wakeup(clp);
3496 else
3497 error = NFSERR_NOMATCHLAYOUT;
3498 } else
3499 error = NFSERR_NOMATCHLAYOUT;
3500 NFSUNLOCKCLSTATE();
3501 } else if (recalltype == NFSLAYOUTRETURN_ALL) {
3502 gotone = 0;
3503 NFSLOCKCLSTATE();
3504 clp = nfscl_getclntsess(sessionid);
3505 if (clp != NULL) {
3506 TAILQ_FOREACH(lyp, &clp->nfsc_layout,
3507 nfsly_list) {
3508 error = nfscl_layoutrecall(
3509 recalltype, lyp, iomode, 0,
3510 UINT64_MAX,
3511 lyp->nfsly_stateid.seqid,
3512 recallp);
3513 recallp = NULL;
3514 gotone = 1;
3515 }
3516 if (gotone != 0)
3517 wakeup(clp);
3518 else
3519 error = NFSERR_NOMATCHLAYOUT;
3520 } else
3521 error = NFSERR_NOMATCHLAYOUT;
3522 NFSUNLOCKCLSTATE();
3523 } else
3524 error = NFSERR_NOMATCHLAYOUT;
3525 if (recallp != NULL) {
3526 free(recallp, M_NFSLAYRECALL);
3527 recallp = NULL;
3528 }
3529 break;
3530 case NFSV4OP_CBSEQUENCE:
3531 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3532 5 * NFSX_UNSIGNED);
3533 bcopy(tl, sessionid, NFSX_V4SESSIONID);
3534 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3535 seqid = fxdr_unsigned(uint32_t, *tl++);
3536 slotid = fxdr_unsigned(uint32_t, *tl++);
3537 highslot = fxdr_unsigned(uint32_t, *tl++);
3538 cachethis = *tl++;
3539 /* Throw away the referring call stuff. */
3540 clist = fxdr_unsigned(int, *tl);
3541 for (j = 0; j < clist; j++) {
3542 NFSM_DISSECT(tl, uint32_t *, NFSX_V4SESSIONID +
3543 NFSX_UNSIGNED);
3544 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3545 rcalls = fxdr_unsigned(int, *tl);
3546 for (k = 0; k < rcalls; k++) {
3547 NFSM_DISSECT(tl, uint32_t *,
3548 2 * NFSX_UNSIGNED);
3549 }
3550 }
3551 NFSLOCKCLSTATE();
3552 if (i == 0) {
3553 clp = nfscl_getclntsess(sessionid);
3554 if (clp == NULL)
3555 error = NFSERR_SERVERFAULT;
3556 } else
3557 error = NFSERR_SEQUENCEPOS;
3558 if (error == 0) {
3559 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3560 error = nfsv4_seqsession(seqid, slotid,
3561 highslot, tsep->nfsess_cbslots, &rep,
3562 tsep->nfsess_backslots);
3563 }
3564 NFSUNLOCKCLSTATE();
3565 if (error == 0 || error == NFSERR_REPLYFROMCACHE) {
3566 gotseq_ok = 1;
3567 if (rep != NULL) {
3568 /*
3569 * Handle a reply for a retried
3570 * callback. The reply will be
3571 * re-inserted in the session cache
3572 * by the nfsv4_seqsess_cacherep() call
3573 * after out:
3574 */
3575 KASSERT(error == NFSERR_REPLYFROMCACHE,
3576 ("cbsequence: non-NULL rep"));
3577 NFSCL_DEBUG(4, "Got cbretry\n");
3578 m_freem(nd->nd_mreq);
3579 nd->nd_mreq = rep;
3580 rep = NULL;
3581 goto out;
3582 }
3583 NFSM_BUILD(tl, uint32_t *,
3584 NFSX_V4SESSIONID + 4 * NFSX_UNSIGNED);
3585 bcopy(sessionid, tl, NFSX_V4SESSIONID);
3586 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
3587 *tl++ = txdr_unsigned(seqid);
3588 *tl++ = txdr_unsigned(slotid);
3589 *tl++ = txdr_unsigned(NFSV4_CBSLOTS - 1);
3590 *tl = txdr_unsigned(NFSV4_CBSLOTS - 1);
3591 }
3592 break;
3593 default:
3594 if (i == 0 && minorvers == NFSV41_MINORVERSION)
3595 error = NFSERR_OPNOTINSESS;
3596 else {
3597 NFSCL_DEBUG(1, "unsupp callback %d\n", op);
3598 error = NFSERR_NOTSUPP;
3599 }
3600 break;
3601 };
3602 if (error) {
3603 if (error == EBADRPC || error == NFSERR_BADXDR) {
3604 nd->nd_repstat = NFSERR_BADXDR;
3605 } else {
3606 nd->nd_repstat = error;
3607 }
3608 error = 0;
3609 }
3610 retops++;
3611 if (nd->nd_repstat) {
3612 *repp = nfscl_errmap(nd, minorvers);
3613 break;
3614 } else
3615 *repp = 0; /* NFS4_OK */
3616 }
3617 nfsmout:
3618 if (recallp != NULL)
3619 free(recallp, M_NFSLAYRECALL);
3620 if (error) {
3621 if (error == EBADRPC || error == NFSERR_BADXDR)
3622 nd->nd_repstat = NFSERR_BADXDR;
3623 else
3624 printf("nfsv4 comperr1=%d\n", error);
3625 }
3626 if (taglen == -1) {
3627 NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
3628 *tl++ = 0;
3629 *tl = 0;
3630 } else {
3631 *retopsp = txdr_unsigned(retops);
3632 }
3633 *nd->nd_errp = nfscl_errmap(nd, minorvers);
3634 out:
3635 if (gotseq_ok != 0) {
3636 rep = m_copym(nd->nd_mreq, 0, M_COPYALL, M_WAITOK);
3637 NFSLOCKCLSTATE();
3638 clp = nfscl_getclntsess(sessionid);
3639 if (clp != NULL) {
3640 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3641 nfsv4_seqsess_cacherep(slotid, tsep->nfsess_cbslots,
3642 NFSERR_OK, &rep);
3643 NFSUNLOCKCLSTATE();
3644 } else {
3645 NFSUNLOCKCLSTATE();
3646 m_freem(rep);
3647 }
3648 }
3649 }
3650
3651 /*
3652 * Generate the next cbident value. Basically just increment a static value
3653 * and then check that it isn't already in the list, if it has wrapped around.
3654 */
3655 static u_int32_t
3656 nfscl_nextcbident(void)
3657 {
3658 struct nfsclclient *clp;
3659 int matched;
3660 static u_int32_t nextcbident = 0;
3661 static int haswrapped = 0;
3662
3663 nextcbident++;
3664 if (nextcbident == 0)
3665 haswrapped = 1;
3666 if (haswrapped) {
3667 /*
3668 * Search the clientid list for one already using this cbident.
3669 */
3670 do {
3671 matched = 0;
3672 NFSLOCKCLSTATE();
3673 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3674 if (clp->nfsc_cbident == nextcbident) {
3675 matched = 1;
3676 break;
3677 }
3678 }
3679 NFSUNLOCKCLSTATE();
3680 if (matched == 1)
3681 nextcbident++;
3682 } while (matched);
3683 }
3684 return (nextcbident);
3685 }
3686
3687 /*
3688 * Get the mount point related to a given cbident or session and busy it.
3689 */
3690 static mount_t
3691 nfscl_getmnt(int minorvers, uint8_t *sessionid, u_int32_t cbident,
3692 struct nfsclclient **clpp)
3693 {
3694 struct nfsclclient *clp;
3695 mount_t mp;
3696 int error;
3697 struct nfsclsession *tsep;
3698
3699 *clpp = NULL;
3700 NFSLOCKCLSTATE();
3701 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3702 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3703 if (minorvers == NFSV4_MINORVERSION) {
3704 if (clp->nfsc_cbident == cbident)
3705 break;
3706 } else if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3707 NFSX_V4SESSIONID))
3708 break;
3709 }
3710 if (clp == NULL) {
3711 NFSUNLOCKCLSTATE();
3712 return (NULL);
3713 }
3714 mp = clp->nfsc_nmp->nm_mountp;
3715 vfs_ref(mp);
3716 NFSUNLOCKCLSTATE();
3717 error = vfs_busy(mp, 0);
3718 vfs_rel(mp);
3719 if (error != 0)
3720 return (NULL);
3721 *clpp = clp;
3722 return (mp);
3723 }
3724
3725 /*
3726 * Get the clientid pointer related to a given cbident.
3727 */
3728 static struct nfsclclient *
3729 nfscl_getclnt(u_int32_t cbident)
3730 {
3731 struct nfsclclient *clp;
3732
3733 LIST_FOREACH(clp, &nfsclhead, nfsc_list)
3734 if (clp->nfsc_cbident == cbident)
3735 break;
3736 return (clp);
3737 }
3738
3739 /*
3740 * Get the clientid pointer related to a given sessionid.
3741 */
3742 static struct nfsclclient *
3743 nfscl_getclntsess(uint8_t *sessionid)
3744 {
3745 struct nfsclclient *clp;
3746 struct nfsclsession *tsep;
3747
3748 LIST_FOREACH(clp, &nfsclhead, nfsc_list) {
3749 tsep = nfsmnt_mdssession(clp->nfsc_nmp);
3750 if (!NFSBCMP(tsep->nfsess_sessionid, sessionid,
3751 NFSX_V4SESSIONID))
3752 break;
3753 }
3754 return (clp);
3755 }
3756
3757 /*
3758 * Search for a lock conflict locally on the client. A conflict occurs if
3759 * - not same owner and overlapping byte range and at least one of them is
3760 * a write lock or this is an unlock.
3761 */
3762 static int
3763 nfscl_localconflict(struct nfsclclient *clp, u_int8_t *fhp, int fhlen,
3764 struct nfscllock *nlop, u_int8_t *own, struct nfscldeleg *dp,
3765 struct nfscllock **lopp)
3766 {
3767 struct nfsclowner *owp;
3768 struct nfsclopen *op;
3769 int ret;
3770
3771 if (dp != NULL) {
3772 ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp);
3773 if (ret)
3774 return (ret);
3775 }
3776 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3777 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
3778 if (op->nfso_fhlen == fhlen &&
3779 !NFSBCMP(op->nfso_fh, fhp, fhlen)) {
3780 ret = nfscl_checkconflict(&op->nfso_lock, nlop,
3781 own, lopp);
3782 if (ret)
3783 return (ret);
3784 }
3785 }
3786 }
3787 return (0);
3788 }
3789
3790 static int
3791 nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop,
3792 u_int8_t *own, struct nfscllock **lopp)
3793 {
3794 struct nfscllockowner *lp;
3795 struct nfscllock *lop;
3796
3797 LIST_FOREACH(lp, lhp, nfsl_list) {
3798 if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
3799 LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) {
3800 if (lop->nfslo_first >= nlop->nfslo_end)
3801 break;
3802 if (lop->nfslo_end <= nlop->nfslo_first)
3803 continue;
3804 if (lop->nfslo_type == F_WRLCK ||
3805 nlop->nfslo_type == F_WRLCK ||
3806 nlop->nfslo_type == F_UNLCK) {
3807 if (lopp != NULL)
3808 *lopp = lop;
3809 return (NFSERR_DENIED);
3810 }
3811 }
3812 }
3813 }
3814 return (0);
3815 }
3816
3817 /*
3818 * Check for a local conflicting lock.
3819 */
3820 APPLESTATIC int
3821 nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off,
3822 u_int64_t len, struct flock *fl, NFSPROC_T *p, void *id, int flags)
3823 {
3824 struct nfscllock *lop, nlck;
3825 struct nfscldeleg *dp;
3826 struct nfsnode *np;
3827 u_int8_t own[NFSV4CL_LOCKNAMELEN];
3828 int error;
3829
3830 nlck.nfslo_type = fl->l_type;
3831 nlck.nfslo_first = off;
3832 if (len == NFS64BITSSET) {
3833 nlck.nfslo_end = NFS64BITSSET;
3834 } else {
3835 nlck.nfslo_end = off + len;
3836 if (nlck.nfslo_end <= nlck.nfslo_first)
3837 return (NFSERR_INVAL);
3838 }
3839 np = VTONFS(vp);
3840 nfscl_filllockowner(id, own, flags);
3841 NFSLOCKCLSTATE();
3842 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
3843 error = nfscl_localconflict(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len,
3844 &nlck, own, dp, &lop);
3845 if (error != 0) {
3846 fl->l_whence = SEEK_SET;
3847 fl->l_start = lop->nfslo_first;
3848 if (lop->nfslo_end == NFS64BITSSET)
3849 fl->l_len = 0;
3850 else
3851 fl->l_len = lop->nfslo_end - lop->nfslo_first;
3852 fl->l_pid = (pid_t)0;
3853 fl->l_type = lop->nfslo_type;
3854 error = -1; /* no RPC required */
3855 } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) ||
3856 fl->l_type == F_RDLCK)) {
3857 /*
3858 * The delegation ensures that there isn't a conflicting
3859 * lock on the server, so return -1 to indicate an RPC
3860 * isn't required.
3861 */
3862 fl->l_type = F_UNLCK;
3863 error = -1;
3864 }
3865 NFSUNLOCKCLSTATE();
3866 return (error);
3867 }
3868
3869 /*
3870 * Handle Recall of a delegation.
3871 * The clp must be exclusive locked when this is called.
3872 */
3873 static int
3874 nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp,
3875 struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p,
3876 int called_from_renewthread)
3877 {
3878 struct nfsclowner *owp, *lowp, *nowp;
3879 struct nfsclopen *op, *lop;
3880 struct nfscllockowner *lp;
3881 struct nfscllock *lckp;
3882 struct nfsnode *np;
3883 int error = 0, ret, gotvp = 0;
3884
3885 if (vp == NULL) {
3886 /*
3887 * First, get a vnode for the file. This is needed to do RPCs.
3888 */
3889 ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh,
3890 dp->nfsdl_fhlen, p, &np);
3891 if (ret) {
3892 /*
3893 * File isn't open, so nothing to move over to the
3894 * server.
3895 */
3896 return (0);
3897 }
3898 vp = NFSTOV(np);
3899 gotvp = 1;
3900 } else {
3901 np = VTONFS(vp);
3902 }
3903 dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET;
3904
3905 /*
3906 * Ok, if it's a write delegation, flush data to the server, so
3907 * that close/open consistency is retained.
3908 */
3909 ret = 0;
3910 NFSLOCKNODE(np);
3911 if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) {
3912 np->n_flag |= NDELEGRECALL;
3913 NFSUNLOCKNODE(np);
3914 ret = ncl_flush(vp, MNT_WAIT, p, 1, called_from_renewthread);
3915 NFSLOCKNODE(np);
3916 np->n_flag &= ~NDELEGRECALL;
3917 }
3918 NFSINVALATTRCACHE(np);
3919 NFSUNLOCKNODE(np);
3920 if (ret == EIO && called_from_renewthread != 0) {
3921 /*
3922 * If the flush failed with EIO for the renew thread,
3923 * return now, so that the dirty buffer will be flushed
3924 * later.
3925 */
3926 if (gotvp != 0)
3927 vrele(vp);
3928 return (ret);
3929 }
3930
3931 /*
3932 * Now, for each openowner with opens issued locally, move them
3933 * over to state against the server.
3934 */
3935 LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) {
3936 lop = LIST_FIRST(&lowp->nfsow_open);
3937 if (lop != NULL) {
3938 if (LIST_NEXT(lop, nfso_list) != NULL)
3939 panic("nfsdlg mult opens");
3940 /*
3941 * Look for the same openowner against the server.
3942 */
3943 LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
3944 if (!NFSBCMP(lowp->nfsow_owner,
3945 owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) {
3946 newnfs_copycred(&dp->nfsdl_cred, cred);
3947 ret = nfscl_moveopen(vp, clp, nmp, lop,
3948 owp, dp, cred, p);
3949 if (ret == NFSERR_STALECLIENTID ||
3950 ret == NFSERR_STALEDONTRECOVER ||
3951 ret == NFSERR_BADSESSION) {
3952 if (gotvp)
3953 vrele(vp);
3954 return (ret);
3955 }
3956 if (ret) {
3957 nfscl_freeopen(lop, 1);
3958 if (!error)
3959 error = ret;
3960 }
3961 break;
3962 }
3963 }
3964
3965 /*
3966 * If no openowner found, create one and get an open
3967 * for it.
3968 */
3969 if (owp == NULL) {
3970 MALLOC(nowp, struct nfsclowner *,
3971 sizeof (struct nfsclowner), M_NFSCLOWNER,
3972 M_WAITOK);
3973 nfscl_newopen(clp, NULL, &owp, &nowp, &op,
3974 NULL, lowp->nfsow_owner, dp->nfsdl_fh,
3975 dp->nfsdl_fhlen, NULL, NULL);
3976 newnfs_copycred(&dp->nfsdl_cred, cred);
3977 ret = nfscl_moveopen(vp, clp, nmp, lop,
3978 owp, dp, cred, p);
3979 if (ret) {
3980 nfscl_freeopenowner(owp, 0);
3981 if (ret == NFSERR_STALECLIENTID ||
3982 ret == NFSERR_STALEDONTRECOVER ||
3983 ret == NFSERR_BADSESSION) {
3984 if (gotvp)
3985 vrele(vp);
3986 return (ret);
3987 }
3988 if (ret) {
3989 nfscl_freeopen(lop, 1);
3990 if (!error)
3991 error = ret;
3992 }
3993 }
3994 }
3995 }
3996 }
3997
3998 /*
3999 * Now, get byte range locks for any locks done locally.
4000 */
4001 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4002 LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) {
4003 newnfs_copycred(&dp->nfsdl_cred, cred);
4004 ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p);
4005 if (ret == NFSERR_STALESTATEID ||
4006 ret == NFSERR_STALEDONTRECOVER ||
4007 ret == NFSERR_STALECLIENTID ||
4008 ret == NFSERR_BADSESSION) {
4009 if (gotvp)
4010 vrele(vp);
4011 return (ret);
4012 }
4013 if (ret && !error)
4014 error = ret;
4015 }
4016 }
4017 if (gotvp)
4018 vrele(vp);
4019 return (error);
4020 }
4021
4022 /*
4023 * Move a locally issued open over to an owner on the state list.
4024 * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and
4025 * returns with it unlocked.
4026 */
4027 static int
4028 nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4029 struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp,
4030 struct ucred *cred, NFSPROC_T *p)
4031 {
4032 struct nfsclopen *op, *nop;
4033 struct nfscldeleg *ndp;
4034 struct nfsnode *np;
4035 int error = 0, newone;
4036
4037 /*
4038 * First, look for an appropriate open, If found, just increment the
4039 * opencnt in it.
4040 */
4041 LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
4042 if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode &&
4043 op->nfso_fhlen == lop->nfso_fhlen &&
4044 !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) {
4045 op->nfso_opencnt += lop->nfso_opencnt;
4046 nfscl_freeopen(lop, 1);
4047 return (0);
4048 }
4049 }
4050
4051 /* No appropriate open, so we have to do one against the server. */
4052 np = VTONFS(vp);
4053 MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) +
4054 lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK);
4055 newone = 0;
4056 nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner,
4057 lop->nfso_fh, lop->nfso_fhlen, cred, &newone);
4058 ndp = dp;
4059 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen,
4060 lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op,
4061 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p);
4062 if (error) {
4063 if (newone)
4064 nfscl_freeopen(op, 0);
4065 } else {
4066 op->nfso_mode |= lop->nfso_mode;
4067 op->nfso_opencnt += lop->nfso_opencnt;
4068 nfscl_freeopen(lop, 1);
4069 }
4070 if (nop != NULL)
4071 FREE((caddr_t)nop, M_NFSCLOPEN);
4072 if (ndp != NULL) {
4073 /*
4074 * What should I do with the returned delegation, since the
4075 * delegation is being recalled? For now, just printf and
4076 * through it away.
4077 */
4078 printf("Moveopen returned deleg\n");
4079 FREE((caddr_t)ndp, M_NFSCLDELEG);
4080 }
4081 return (error);
4082 }
4083
4084 /*
4085 * Recall all delegations on this client.
4086 */
4087 static void
4088 nfscl_totalrecall(struct nfsclclient *clp)
4089 {
4090 struct nfscldeleg *dp;
4091
4092 TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
4093 if ((dp->nfsdl_flags & NFSCLDL_DELEGRET) == 0)
4094 dp->nfsdl_flags |= NFSCLDL_RECALL;
4095 }
4096 }
4097
4098 /*
4099 * Relock byte ranges. Called for delegation recall and state expiry.
4100 */
4101 static int
4102 nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp,
4103 struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred,
4104 NFSPROC_T *p)
4105 {
4106 struct nfscllockowner *nlp;
4107 struct nfsfh *nfhp;
4108 u_int64_t off, len;
4109 u_int32_t clidrev = 0;
4110 int error, newone, donelocally;
4111
4112 off = lop->nfslo_first;
4113 len = lop->nfslo_end - lop->nfslo_first;
4114 error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p,
4115 clp, 1, NULL, lp->nfsl_lockflags, lp->nfsl_owner,
4116 lp->nfsl_openowner, &nlp, &newone, &donelocally);
4117 if (error || donelocally)
4118 return (error);
4119 if (nmp->nm_clp != NULL)
4120 clidrev = nmp->nm_clp->nfsc_clientidrev;
4121 else
4122 clidrev = 0;
4123 nfhp = VTONFS(vp)->n_fhp;
4124 error = nfscl_trylock(nmp, vp, nfhp->nfh_fh,
4125 nfhp->nfh_len, nlp, newone, 0, off,
4126 len, lop->nfslo_type, cred, p);
4127 if (error)
4128 nfscl_freelockowner(nlp, 0);
4129 return (error);
4130 }
4131
4132 /*
4133 * Called to re-open a file. Basically get a vnode for the file handle
4134 * and then call nfsrpc_openrpc() to do the rest.
4135 */
4136 static int
4137 nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen,
4138 u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp,
4139 struct ucred *cred, NFSPROC_T *p)
4140 {
4141 struct nfsnode *np;
4142 vnode_t vp;
4143 int error;
4144
4145 error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np);
4146 if (error)
4147 return (error);
4148 vp = NFSTOV(np);
4149 if (np->n_v4 != NULL) {
4150 error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data,
4151 np->n_v4->n4_fhlen, fhp, fhlen, mode, op,
4152 NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0,
4153 cred, p);
4154 } else {
4155 error = EINVAL;
4156 }
4157 vrele(vp);
4158 return (error);
4159 }
4160
4161 /*
4162 * Try an open against the server. Just call nfsrpc_openrpc(), retrying while
4163 * NFSERR_DELAY. Also, try system credentials, if the passed in credentials
4164 * fail.
4165 */
4166 static int
4167 nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4168 u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op,
4169 u_int8_t *name, int namelen, struct nfscldeleg **ndpp,
4170 int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p)
4171 {
4172 int error;
4173
4174 do {
4175 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen,
4176 mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p,
4177 0, 0);
4178 if (error == NFSERR_DELAY)
4179 (void) nfs_catnap(PZERO, error, "nfstryop");
4180 } while (error == NFSERR_DELAY);
4181 if (error == EAUTH || error == EACCES) {
4182 /* Try again using system credentials */
4183 newnfs_setroot(cred);
4184 do {
4185 error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp,
4186 newfhlen, mode, op, name, namelen, ndpp, reclaim,
4187 delegtype, cred, p, 1, 0);
4188 if (error == NFSERR_DELAY)
4189 (void) nfs_catnap(PZERO, error, "nfstryop");
4190 } while (error == NFSERR_DELAY);
4191 }
4192 return (error);
4193 }
4194
4195 /*
4196 * Try a byte range lock. Just loop on nfsrpc_lock() while it returns
4197 * NFSERR_DELAY. Also, retry with system credentials, if the provided
4198 * cred don't work.
4199 */
4200 static int
4201 nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp,
4202 int fhlen, struct nfscllockowner *nlp, int newone, int reclaim,
4203 u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p)
4204 {
4205 struct nfsrv_descript nfsd, *nd = &nfsd;
4206 int error;
4207
4208 do {
4209 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone,
4210 reclaim, off, len, type, cred, p, 0);
4211 if (!error && nd->nd_repstat == NFSERR_DELAY)
4212 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4213 "nfstrylck");
4214 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4215 if (!error)
4216 error = nd->nd_repstat;
4217 if (error == EAUTH || error == EACCES) {
4218 /* Try again using root credentials */
4219 newnfs_setroot(cred);
4220 do {
4221 error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp,
4222 newone, reclaim, off, len, type, cred, p, 1);
4223 if (!error && nd->nd_repstat == NFSERR_DELAY)
4224 (void) nfs_catnap(PZERO, (int)nd->nd_repstat,
4225 "nfstrylck");
4226 } while (!error && nd->nd_repstat == NFSERR_DELAY);
4227 if (!error)
4228 error = nd->nd_repstat;
4229 }
4230 return (error);
4231 }
4232
4233 /*
4234 * Try a delegreturn against the server. Just call nfsrpc_delegreturn(),
4235 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4236 * credentials fail.
4237 */
4238 static int
4239 nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred,
4240 struct nfsmount *nmp, NFSPROC_T *p)
4241 {
4242 int error;
4243
4244 do {
4245 error = nfsrpc_delegreturn(dp, cred, nmp, p, 0);
4246 if (error == NFSERR_DELAY)
4247 (void) nfs_catnap(PZERO, error, "nfstrydp");
4248 } while (error == NFSERR_DELAY);
4249 if (error == EAUTH || error == EACCES) {
4250 /* Try again using system credentials */
4251 newnfs_setroot(cred);
4252 do {
4253 error = nfsrpc_delegreturn(dp, cred, nmp, p, 1);
4254 if (error == NFSERR_DELAY)
4255 (void) nfs_catnap(PZERO, error, "nfstrydp");
4256 } while (error == NFSERR_DELAY);
4257 }
4258 return (error);
4259 }
4260
4261 /*
4262 * Try a close against the server. Just call nfsrpc_closerpc(),
4263 * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in
4264 * credentials fail.
4265 */
4266 APPLESTATIC int
4267 nfscl_tryclose(struct nfsclopen *op, struct ucred *cred,
4268 struct nfsmount *nmp, NFSPROC_T *p)
4269 {
4270 struct nfsrv_descript nfsd, *nd = &nfsd;
4271 int error;
4272
4273 do {
4274 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0);
4275 if (error == NFSERR_DELAY)
4276 (void) nfs_catnap(PZERO, error, "nfstrycl");
4277 } while (error == NFSERR_DELAY);
4278 if (error == EAUTH || error == EACCES) {
4279 /* Try again using system credentials */
4280 newnfs_setroot(cred);
4281 do {
4282 error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1);
4283 if (error == NFSERR_DELAY)
4284 (void) nfs_catnap(PZERO, error, "nfstrycl");
4285 } while (error == NFSERR_DELAY);
4286 }
4287 return (error);
4288 }
4289
4290 /*
4291 * Decide if a delegation on a file permits close without flushing writes
4292 * to the server. This might be a big performance win in some environments.
4293 * (Not useful until the client does caching on local stable storage.)
4294 */
4295 APPLESTATIC int
4296 nfscl_mustflush(vnode_t vp)
4297 {
4298 struct nfsclclient *clp;
4299 struct nfscldeleg *dp;
4300 struct nfsnode *np;
4301 struct nfsmount *nmp;
4302
4303 np = VTONFS(vp);
4304 nmp = VFSTONFS(vnode_mount(vp));
4305 if (!NFSHASNFSV4(nmp))
4306 return (1);
4307 NFSLOCKCLSTATE();
4308 clp = nfscl_findcl(nmp);
4309 if (clp == NULL) {
4310 NFSUNLOCKCLSTATE();
4311 return (1);
4312 }
4313 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4314 if (dp != NULL && (dp->nfsdl_flags &
4315 (NFSCLDL_WRITE | NFSCLDL_RECALL | NFSCLDL_DELEGRET)) ==
4316 NFSCLDL_WRITE &&
4317 (dp->nfsdl_sizelimit >= np->n_size ||
4318 !NFSHASSTRICT3530(nmp))) {
4319 NFSUNLOCKCLSTATE();
4320 return (0);
4321 }
4322 NFSUNLOCKCLSTATE();
4323 return (1);
4324 }
4325
4326 /*
4327 * See if a (write) delegation exists for this file.
4328 */
4329 APPLESTATIC int
4330 nfscl_nodeleg(vnode_t vp, int writedeleg)
4331 {
4332 struct nfsclclient *clp;
4333 struct nfscldeleg *dp;
4334 struct nfsnode *np;
4335 struct nfsmount *nmp;
4336
4337 np = VTONFS(vp);
4338 nmp = VFSTONFS(vnode_mount(vp));
4339 if (!NFSHASNFSV4(nmp))
4340 return (1);
4341 NFSLOCKCLSTATE();
4342 clp = nfscl_findcl(nmp);
4343 if (clp == NULL) {
4344 NFSUNLOCKCLSTATE();
4345 return (1);
4346 }
4347 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4348 if (dp != NULL &&
4349 (dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_DELEGRET)) == 0 &&
4350 (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) ==
4351 NFSCLDL_WRITE)) {
4352 NFSUNLOCKCLSTATE();
4353 return (0);
4354 }
4355 NFSUNLOCKCLSTATE();
4356 return (1);
4357 }
4358
4359 /*
4360 * Look for an associated delegation that should be DelegReturned.
4361 */
4362 APPLESTATIC int
4363 nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp)
4364 {
4365 struct nfsclclient *clp;
4366 struct nfscldeleg *dp;
4367 struct nfsclowner *owp;
4368 struct nfscllockowner *lp;
4369 struct nfsmount *nmp;
4370 struct ucred *cred;
4371 struct nfsnode *np;
4372 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4373
4374 nmp = VFSTONFS(vnode_mount(vp));
4375 np = VTONFS(vp);
4376 NFSLOCKCLSTATE();
4377 /*
4378 * Loop around waiting for:
4379 * - outstanding I/O operations on delegations to complete
4380 * - for a delegation on vp that has state, lock the client and
4381 * do a recall
4382 * - return delegation with no state
4383 */
4384 while (1) {
4385 clp = nfscl_findcl(nmp);
4386 if (clp == NULL) {
4387 NFSUNLOCKCLSTATE();
4388 return (retcnt);
4389 }
4390 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4391 np->n_fhp->nfh_len);
4392 if (dp != NULL) {
4393 /*
4394 * Wait for outstanding I/O ops to be done.
4395 */
4396 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4397 if (igotlock) {
4398 nfsv4_unlock(&clp->nfsc_lock, 0);
4399 igotlock = 0;
4400 }
4401 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4402 (void) nfsmsleep(&dp->nfsdl_rwlock,
4403 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4404 continue;
4405 }
4406 needsrecall = 0;
4407 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4408 if (!LIST_EMPTY(&owp->nfsow_open)) {
4409 needsrecall = 1;
4410 break;
4411 }
4412 }
4413 if (!needsrecall) {
4414 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4415 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4416 needsrecall = 1;
4417 break;
4418 }
4419 }
4420 }
4421 if (needsrecall && !triedrecall) {
4422 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4423 islept = 0;
4424 while (!igotlock) {
4425 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4426 &islept, NFSCLSTATEMUTEXPTR, NULL);
4427 if (islept)
4428 break;
4429 }
4430 if (islept)
4431 continue;
4432 NFSUNLOCKCLSTATE();
4433 cred = newnfs_getcred();
4434 newnfs_copycred(&dp->nfsdl_cred, cred);
4435 (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p, 0);
4436 NFSFREECRED(cred);
4437 triedrecall = 1;
4438 NFSLOCKCLSTATE();
4439 nfsv4_unlock(&clp->nfsc_lock, 0);
4440 igotlock = 0;
4441 continue;
4442 }
4443 *stp = dp->nfsdl_stateid;
4444 retcnt = 1;
4445 nfscl_cleandeleg(dp);
4446 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4447 }
4448 if (igotlock)
4449 nfsv4_unlock(&clp->nfsc_lock, 0);
4450 NFSUNLOCKCLSTATE();
4451 return (retcnt);
4452 }
4453 }
4454
4455 /*
4456 * Look for associated delegation(s) that should be DelegReturned.
4457 */
4458 APPLESTATIC int
4459 nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp,
4460 nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p)
4461 {
4462 struct nfsclclient *clp;
4463 struct nfscldeleg *dp;
4464 struct nfsclowner *owp;
4465 struct nfscllockowner *lp;
4466 struct nfsmount *nmp;
4467 struct ucred *cred;
4468 struct nfsnode *np;
4469 int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept;
4470
4471 nmp = VFSTONFS(vnode_mount(fvp));
4472 *gotfdp = 0;
4473 *gottdp = 0;
4474 NFSLOCKCLSTATE();
4475 /*
4476 * Loop around waiting for:
4477 * - outstanding I/O operations on delegations to complete
4478 * - for a delegation on fvp that has state, lock the client and
4479 * do a recall
4480 * - return delegation(s) with no state.
4481 */
4482 while (1) {
4483 clp = nfscl_findcl(nmp);
4484 if (clp == NULL) {
4485 NFSUNLOCKCLSTATE();
4486 return (retcnt);
4487 }
4488 np = VTONFS(fvp);
4489 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4490 np->n_fhp->nfh_len);
4491 if (dp != NULL && *gotfdp == 0) {
4492 /*
4493 * Wait for outstanding I/O ops to be done.
4494 */
4495 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4496 if (igotlock) {
4497 nfsv4_unlock(&clp->nfsc_lock, 0);
4498 igotlock = 0;
4499 }
4500 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4501 (void) nfsmsleep(&dp->nfsdl_rwlock,
4502 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4503 continue;
4504 }
4505 needsrecall = 0;
4506 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4507 if (!LIST_EMPTY(&owp->nfsow_open)) {
4508 needsrecall = 1;
4509 break;
4510 }
4511 }
4512 if (!needsrecall) {
4513 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4514 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4515 needsrecall = 1;
4516 break;
4517 }
4518 }
4519 }
4520 if (needsrecall && !triedrecall) {
4521 dp->nfsdl_flags |= NFSCLDL_DELEGRET;
4522 islept = 0;
4523 while (!igotlock) {
4524 igotlock = nfsv4_lock(&clp->nfsc_lock, 1,
4525 &islept, NFSCLSTATEMUTEXPTR, NULL);
4526 if (islept)
4527 break;
4528 }
4529 if (islept)
4530 continue;
4531 NFSUNLOCKCLSTATE();
4532 cred = newnfs_getcred();
4533 newnfs_copycred(&dp->nfsdl_cred, cred);
4534 (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p, 0);
4535 NFSFREECRED(cred);
4536 triedrecall = 1;
4537 NFSLOCKCLSTATE();
4538 nfsv4_unlock(&clp->nfsc_lock, 0);
4539 igotlock = 0;
4540 continue;
4541 }
4542 *fstp = dp->nfsdl_stateid;
4543 retcnt++;
4544 *gotfdp = 1;
4545 nfscl_cleandeleg(dp);
4546 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4547 }
4548 if (igotlock) {
4549 nfsv4_unlock(&clp->nfsc_lock, 0);
4550 igotlock = 0;
4551 }
4552 if (tvp != NULL) {
4553 np = VTONFS(tvp);
4554 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh,
4555 np->n_fhp->nfh_len);
4556 if (dp != NULL && *gottdp == 0) {
4557 /*
4558 * Wait for outstanding I/O ops to be done.
4559 */
4560 if (dp->nfsdl_rwlock.nfslock_usecnt > 0) {
4561 dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED;
4562 (void) nfsmsleep(&dp->nfsdl_rwlock,
4563 NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL);
4564 continue;
4565 }
4566 LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) {
4567 if (!LIST_EMPTY(&owp->nfsow_open)) {
4568 NFSUNLOCKCLSTATE();
4569 return (retcnt);
4570 }
4571 }
4572 LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
4573 if (!LIST_EMPTY(&lp->nfsl_lock)) {
4574 NFSUNLOCKCLSTATE();
4575 return (retcnt);
4576 }
4577 }
4578 *tstp = dp->nfsdl_stateid;
4579 retcnt++;
4580 *gottdp = 1;
4581 nfscl_cleandeleg(dp);
4582 nfscl_freedeleg(&clp->nfsc_deleg, dp);
4583 }
4584 }
4585 NFSUNLOCKCLSTATE();
4586 return (retcnt);
4587 }
4588 }
4589
4590 /*
4591 * Get a reference on the clientid associated with the mount point.
4592 * Return 1 if success, 0 otherwise.
4593 */
4594 APPLESTATIC int
4595 nfscl_getref(struct nfsmount *nmp)
4596 {
4597 struct nfsclclient *clp;
4598
4599 NFSLOCKCLSTATE();
4600 clp = nfscl_findcl(nmp);
4601 if (clp == NULL) {
4602 NFSUNLOCKCLSTATE();
4603 return (0);
4604 }
4605 nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR, NULL);
4606 NFSUNLOCKCLSTATE();
4607 return (1);
4608 }
4609
4610 /*
4611 * Release a reference on a clientid acquired with the above call.
4612 */
4613 APPLESTATIC void
4614 nfscl_relref(struct nfsmount *nmp)
4615 {
4616 struct nfsclclient *clp;
4617
4618 NFSLOCKCLSTATE();
4619 clp = nfscl_findcl(nmp);
4620 if (clp == NULL) {
4621 NFSUNLOCKCLSTATE();
4622 return;
4623 }
4624 nfsv4_relref(&clp->nfsc_lock);
4625 NFSUNLOCKCLSTATE();
4626 }
4627
4628 /*
4629 * Save the size attribute in the delegation, since the nfsnode
4630 * is going away.
4631 */
4632 APPLESTATIC void
4633 nfscl_reclaimnode(vnode_t vp)
4634 {
4635 struct nfsclclient *clp;
4636 struct nfscldeleg *dp;
4637 struct nfsnode *np = VTONFS(vp);
4638 struct nfsmount *nmp;
4639
4640 nmp = VFSTONFS(vnode_mount(vp));
4641 if (!NFSHASNFSV4(nmp))
4642 return;
4643 NFSLOCKCLSTATE();
4644 clp = nfscl_findcl(nmp);
4645 if (clp == NULL) {
4646 NFSUNLOCKCLSTATE();
4647 return;
4648 }
4649 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4650 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4651 dp->nfsdl_size = np->n_size;
4652 NFSUNLOCKCLSTATE();
4653 }
4654
4655 /*
4656 * Get the saved size attribute in the delegation, since it is a
4657 * newly allocated nfsnode.
4658 */
4659 APPLESTATIC void
4660 nfscl_newnode(vnode_t vp)
4661 {
4662 struct nfsclclient *clp;
4663 struct nfscldeleg *dp;
4664 struct nfsnode *np = VTONFS(vp);
4665 struct nfsmount *nmp;
4666
4667 nmp = VFSTONFS(vnode_mount(vp));
4668 if (!NFSHASNFSV4(nmp))
4669 return;
4670 NFSLOCKCLSTATE();
4671 clp = nfscl_findcl(nmp);
4672 if (clp == NULL) {
4673 NFSUNLOCKCLSTATE();
4674 return;
4675 }
4676 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4677 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE))
4678 np->n_size = dp->nfsdl_size;
4679 NFSUNLOCKCLSTATE();
4680 }
4681
4682 /*
4683 * If there is a valid write delegation for this file, set the modtime
4684 * to the local clock time.
4685 */
4686 APPLESTATIC void
4687 nfscl_delegmodtime(vnode_t vp)
4688 {
4689 struct nfsclclient *clp;
4690 struct nfscldeleg *dp;
4691 struct nfsnode *np = VTONFS(vp);
4692 struct nfsmount *nmp;
4693
4694 nmp = VFSTONFS(vnode_mount(vp));
4695 if (!NFSHASNFSV4(nmp))
4696 return;
4697 NFSLOCKCLSTATE();
4698 clp = nfscl_findcl(nmp);
4699 if (clp == NULL) {
4700 NFSUNLOCKCLSTATE();
4701 return;
4702 }
4703 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4704 if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) {
4705 nanotime(&dp->nfsdl_modtime);
4706 dp->nfsdl_flags |= NFSCLDL_MODTIMESET;
4707 }
4708 NFSUNLOCKCLSTATE();
4709 }
4710
4711 /*
4712 * If there is a valid write delegation for this file with a modtime set,
4713 * put that modtime in mtime.
4714 */
4715 APPLESTATIC void
4716 nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime)
4717 {
4718 struct nfsclclient *clp;
4719 struct nfscldeleg *dp;
4720 struct nfsnode *np = VTONFS(vp);
4721 struct nfsmount *nmp;
4722
4723 nmp = VFSTONFS(vnode_mount(vp));
4724 if (!NFSHASNFSV4(nmp))
4725 return;
4726 NFSLOCKCLSTATE();
4727 clp = nfscl_findcl(nmp);
4728 if (clp == NULL) {
4729 NFSUNLOCKCLSTATE();
4730 return;
4731 }
4732 dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
4733 if (dp != NULL &&
4734 (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) ==
4735 (NFSCLDL_WRITE | NFSCLDL_MODTIMESET))
4736 *mtime = dp->nfsdl_modtime;
4737 NFSUNLOCKCLSTATE();
4738 }
4739
4740 static int
4741 nfscl_errmap(struct nfsrv_descript *nd, u_int32_t minorvers)
4742 {
4743 short *defaulterrp, *errp;
4744
4745 if (!nd->nd_repstat)
4746 return (0);
4747 if (nd->nd_procnum == NFSPROC_NOOP)
4748 return (txdr_unsigned(nd->nd_repstat & 0xffff));
4749 if (nd->nd_repstat == EBADRPC)
4750 return (txdr_unsigned(NFSERR_BADXDR));
4751 if (nd->nd_repstat == NFSERR_MINORVERMISMATCH ||
4752 nd->nd_repstat == NFSERR_OPILLEGAL)
4753 return (txdr_unsigned(nd->nd_repstat));
4754 if (nd->nd_repstat >= NFSERR_BADIOMODE && nd->nd_repstat < 20000 &&
4755 minorvers > NFSV4_MINORVERSION) {
4756 /* NFSv4.n error. */
4757 return (txdr_unsigned(nd->nd_repstat));
4758 }
4759 if (nd->nd_procnum < NFSV4OP_CBNOPS)
4760 errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum];
4761 else
4762 return (txdr_unsigned(nd->nd_repstat));
4763 while (*++errp)
4764 if (*errp == (short)nd->nd_repstat)
4765 return (txdr_unsigned(nd->nd_repstat));
4766 return (txdr_unsigned(*defaulterrp));
4767 }
4768
4769 /*
4770 * Called to find/add a layout to a client.
4771 * This function returns the layout with a refcnt (shared lock) upon
4772 * success (returns 0) or with no lock/refcnt on the layout when an
4773 * error is returned.
4774 * If a layout is passed in via lypp, it is locked (exclusively locked).
4775 */
4776 APPLESTATIC int
4777 nfscl_layout(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen,
4778 nfsv4stateid_t *stateidp, int retonclose,
4779 struct nfsclflayouthead *fhlp, struct nfscllayout **lypp,
4780 struct ucred *cred, NFSPROC_T *p)
4781 {
4782 struct nfsclclient *clp;
4783 struct nfscllayout *lyp, *tlyp;
4784 struct nfsclflayout *flp;
4785 struct nfsnode *np = VTONFS(vp);
4786 mount_t mp;
4787 int layout_passed_in;
4788
4789 mp = nmp->nm_mountp;
4790 layout_passed_in = 1;
4791 tlyp = NULL;
4792 lyp = *lypp;
4793 if (lyp == NULL) {
4794 layout_passed_in = 0;
4795 tlyp = malloc(sizeof(*tlyp) + fhlen - 1, M_NFSLAYOUT,
4796 M_WAITOK | M_ZERO);
4797 }
4798
4799 NFSLOCKCLSTATE();
4800 clp = nmp->nm_clp;
4801 if (clp == NULL) {
4802 if (layout_passed_in != 0)
4803 nfsv4_unlock(&lyp->nfsly_lock, 0);
4804 NFSUNLOCKCLSTATE();
4805 if (tlyp != NULL)
4806 free(tlyp, M_NFSLAYOUT);
4807 return (EPERM);
4808 }
4809 if (lyp == NULL) {
4810 /*
4811 * Although no lyp was passed in, another thread might have
4812 * allocated one. If one is found, just increment it's ref
4813 * count and return it.
4814 */
4815 lyp = nfscl_findlayout(clp, fhp, fhlen);
4816 if (lyp == NULL) {
4817 lyp = tlyp;
4818 tlyp = NULL;
4819 lyp->nfsly_stateid.seqid = stateidp->seqid;
4820 lyp->nfsly_stateid.other[0] = stateidp->other[0];
4821 lyp->nfsly_stateid.other[1] = stateidp->other[1];
4822 lyp->nfsly_stateid.other[2] = stateidp->other[2];
4823 lyp->nfsly_lastbyte = 0;
4824 LIST_INIT(&lyp->nfsly_flayread);
4825 LIST_INIT(&lyp->nfsly_flayrw);
4826 LIST_INIT(&lyp->nfsly_recall);
4827 lyp->nfsly_filesid[0] = np->n_vattr.na_filesid[0];
4828 lyp->nfsly_filesid[1] = np->n_vattr.na_filesid[1];
4829 lyp->nfsly_clp = clp;
4830 lyp->nfsly_flags = (retonclose != 0) ?
4831 (NFSLY_FILES | NFSLY_RETONCLOSE) : NFSLY_FILES;
4832 lyp->nfsly_fhlen = fhlen;
4833 NFSBCOPY(fhp, lyp->nfsly_fh, fhlen);
4834 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4835 LIST_INSERT_HEAD(NFSCLLAYOUTHASH(clp, fhp, fhlen), lyp,
4836 nfsly_hash);
4837 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4838 nfscl_layoutcnt++;
4839 } else {
4840 if (retonclose != 0)
4841 lyp->nfsly_flags |= NFSLY_RETONCLOSE;
4842 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4843 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4844 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4845 }
4846 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
4847 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
4848 NFSUNLOCKCLSTATE();
4849 if (tlyp != NULL)
4850 free(tlyp, M_NFSLAYOUT);
4851 return (EPERM);
4852 }
4853 *lypp = lyp;
4854 } else
4855 lyp->nfsly_stateid.seqid = stateidp->seqid;
4856
4857 /* Merge the new list of File Layouts into the list. */
4858 flp = LIST_FIRST(fhlp);
4859 if (flp != NULL) {
4860 if (flp->nfsfl_iomode == NFSLAYOUTIOMODE_READ)
4861 nfscl_mergeflayouts(&lyp->nfsly_flayread, fhlp);
4862 else
4863 nfscl_mergeflayouts(&lyp->nfsly_flayrw, fhlp);
4864 }
4865 if (layout_passed_in != 0)
4866 nfsv4_unlock(&lyp->nfsly_lock, 1);
4867 NFSUNLOCKCLSTATE();
4868 if (tlyp != NULL)
4869 free(tlyp, M_NFSLAYOUT);
4870 return (0);
4871 }
4872
4873 /*
4874 * Search for a layout by MDS file handle.
4875 * If one is found, it is returned with a refcnt (shared lock) iff
4876 * retflpp returned non-NULL and locked (exclusive locked) iff retflpp is
4877 * returned NULL.
4878 */
4879 struct nfscllayout *
4880 nfscl_getlayout(struct nfsclclient *clp, uint8_t *fhp, int fhlen,
4881 uint64_t off, struct nfsclflayout **retflpp, int *recalledp)
4882 {
4883 struct nfscllayout *lyp;
4884 mount_t mp;
4885 int error, igotlock;
4886
4887 mp = clp->nfsc_nmp->nm_mountp;
4888 *recalledp = 0;
4889 *retflpp = NULL;
4890 NFSLOCKCLSTATE();
4891 lyp = nfscl_findlayout(clp, fhp, fhlen);
4892 if (lyp != NULL) {
4893 if ((lyp->nfsly_flags & NFSLY_RECALL) == 0) {
4894 TAILQ_REMOVE(&clp->nfsc_layout, lyp, nfsly_list);
4895 TAILQ_INSERT_HEAD(&clp->nfsc_layout, lyp, nfsly_list);
4896 lyp->nfsly_timestamp = NFSD_MONOSEC + 120;
4897 error = nfscl_findlayoutforio(lyp, off,
4898 NFSV4OPEN_ACCESSREAD, retflpp);
4899 if (error == 0)
4900 nfsv4_getref(&lyp->nfsly_lock, NULL,
4901 NFSCLSTATEMUTEXPTR, mp);
4902 else {
4903 do {
4904 igotlock = nfsv4_lock(&lyp->nfsly_lock,
4905 1, NULL, NFSCLSTATEMUTEXPTR, mp);
4906 } while (igotlock == 0 &&
4907 (mp->mnt_kern_flag & MNTK_UNMOUNTF) == 0);
4908 *retflpp = NULL;
4909 }
4910 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
4911 lyp = NULL;
4912 *recalledp = 1;
4913 }
4914 } else {
4915 lyp = NULL;
4916 *recalledp = 1;
4917 }
4918 }
4919 NFSUNLOCKCLSTATE();
4920 return (lyp);
4921 }
4922
4923 /*
4924 * Search for a layout by MDS file handle. If one is found, mark in to be
4925 * recalled, if it already marked "return on close".
4926 */
4927 static void
4928 nfscl_retoncloselayout(vnode_t vp, struct nfsclclient *clp, uint8_t *fhp,
4929 int fhlen, struct nfsclrecalllayout **recallpp)
4930 {
4931 struct nfscllayout *lyp;
4932 uint32_t iomode;
4933
4934 if (vp->v_type != VREG || !NFSHASPNFS(VFSTONFS(vnode_mount(vp))) ||
4935 nfscl_enablecallb == 0 || nfs_numnfscbd == 0 ||
4936 (VTONFS(vp)->n_flag & NNOLAYOUT) != 0)
4937 return;
4938 lyp = nfscl_findlayout(clp, fhp, fhlen);
4939 if (lyp != NULL && (lyp->nfsly_flags & (NFSLY_RETONCLOSE |
4940 NFSLY_RECALL)) == NFSLY_RETONCLOSE) {
4941 iomode = 0;
4942 if (!LIST_EMPTY(&lyp->nfsly_flayread))
4943 iomode |= NFSLAYOUTIOMODE_READ;
4944 if (!LIST_EMPTY(&lyp->nfsly_flayrw))
4945 iomode |= NFSLAYOUTIOMODE_RW;
4946 (void)nfscl_layoutrecall(NFSLAYOUTRETURN_FILE, lyp, iomode,
4947 0, UINT64_MAX, lyp->nfsly_stateid.seqid, *recallpp);
4948 NFSCL_DEBUG(4, "retoncls recall iomode=%d\n", iomode);
4949 *recallpp = NULL;
4950 }
4951 }
4952
4953 /*
4954 * Dereference a layout.
4955 */
4956 void
4957 nfscl_rellayout(struct nfscllayout *lyp, int exclocked)
4958 {
4959
4960 NFSLOCKCLSTATE();
4961 if (exclocked != 0)
4962 nfsv4_unlock(&lyp->nfsly_lock, 0);
4963 else
4964 nfsv4_relref(&lyp->nfsly_lock);
4965 NFSUNLOCKCLSTATE();
4966 }
4967
4968 /*
4969 * Search for a devinfo by deviceid. If one is found, return it after
4970 * acquiring a reference count on it.
4971 */
4972 struct nfscldevinfo *
4973 nfscl_getdevinfo(struct nfsclclient *clp, uint8_t *deviceid,
4974 struct nfscldevinfo *dip)
4975 {
4976
4977 NFSLOCKCLSTATE();
4978 if (dip == NULL)
4979 dip = nfscl_finddevinfo(clp, deviceid);
4980 if (dip != NULL)
4981 dip->nfsdi_refcnt++;
4982 NFSUNLOCKCLSTATE();
4983 return (dip);
4984 }
4985
4986 /*
4987 * Dereference a devinfo structure.
4988 */
4989 static void
4990 nfscl_reldevinfo_locked(struct nfscldevinfo *dip)
4991 {
4992
4993 dip->nfsdi_refcnt--;
4994 if (dip->nfsdi_refcnt == 0)
4995 wakeup(&dip->nfsdi_refcnt);
4996 }
4997
4998 /*
4999 * Dereference a devinfo structure.
5000 */
5001 void
5002 nfscl_reldevinfo(struct nfscldevinfo *dip)
5003 {
5004
5005 NFSLOCKCLSTATE();
5006 nfscl_reldevinfo_locked(dip);
5007 NFSUNLOCKCLSTATE();
5008 }
5009
5010 /*
5011 * Find a layout for this file handle. Return NULL upon failure.
5012 */
5013 static struct nfscllayout *
5014 nfscl_findlayout(struct nfsclclient *clp, u_int8_t *fhp, int fhlen)
5015 {
5016 struct nfscllayout *lyp;
5017
5018 LIST_FOREACH(lyp, NFSCLLAYOUTHASH(clp, fhp, fhlen), nfsly_hash)
5019 if (lyp->nfsly_fhlen == fhlen &&
5020 !NFSBCMP(lyp->nfsly_fh, fhp, fhlen))
5021 break;
5022 return (lyp);
5023 }
5024
5025 /*
5026 * Find a devinfo for this deviceid. Return NULL upon failure.
5027 */
5028 static struct nfscldevinfo *
5029 nfscl_finddevinfo(struct nfsclclient *clp, uint8_t *deviceid)
5030 {
5031 struct nfscldevinfo *dip;
5032
5033 LIST_FOREACH(dip, &clp->nfsc_devinfo, nfsdi_list)
5034 if (NFSBCMP(dip->nfsdi_deviceid, deviceid, NFSX_V4DEVICEID)
5035 == 0)
5036 break;
5037 return (dip);
5038 }
5039
5040 /*
5041 * Merge the new file layout list into the main one, maintaining it in
5042 * increasing offset order.
5043 */
5044 static void
5045 nfscl_mergeflayouts(struct nfsclflayouthead *fhlp,
5046 struct nfsclflayouthead *newfhlp)
5047 {
5048 struct nfsclflayout *flp, *nflp, *prevflp, *tflp;
5049
5050 flp = LIST_FIRST(fhlp);
5051 prevflp = NULL;
5052 LIST_FOREACH_SAFE(nflp, newfhlp, nfsfl_list, tflp) {
5053 while (flp != NULL && flp->nfsfl_off < nflp->nfsfl_off) {
5054 prevflp = flp;
5055 flp = LIST_NEXT(flp, nfsfl_list);
5056 }
5057 if (prevflp == NULL)
5058 LIST_INSERT_HEAD(fhlp, nflp, nfsfl_list);
5059 else
5060 LIST_INSERT_AFTER(prevflp, nflp, nfsfl_list);
5061 prevflp = nflp;
5062 }
5063 }
5064
5065 /*
5066 * Add this nfscldevinfo to the client, if it doesn't already exist.
5067 * This function consumes the structure pointed at by dip, if not NULL.
5068 */
5069 APPLESTATIC int
5070 nfscl_adddevinfo(struct nfsmount *nmp, struct nfscldevinfo *dip,
5071 struct nfsclflayout *flp)
5072 {
5073 struct nfsclclient *clp;
5074 struct nfscldevinfo *tdip;
5075
5076 NFSLOCKCLSTATE();
5077 clp = nmp->nm_clp;
5078 if (clp == NULL) {
5079 NFSUNLOCKCLSTATE();
5080 if (dip != NULL)
5081 free(dip, M_NFSDEVINFO);
5082 return (ENODEV);
5083 }
5084 tdip = nfscl_finddevinfo(clp, flp->nfsfl_dev);
5085 if (tdip != NULL) {
5086 tdip->nfsdi_layoutrefs++;
5087 flp->nfsfl_devp = tdip;
5088 nfscl_reldevinfo_locked(tdip);
5089 NFSUNLOCKCLSTATE();
5090 if (dip != NULL)
5091 free(dip, M_NFSDEVINFO);
5092 return (0);
5093 }
5094 if (dip != NULL) {
5095 LIST_INSERT_HEAD(&clp->nfsc_devinfo, dip, nfsdi_list);
5096 dip->nfsdi_layoutrefs = 1;
5097 flp->nfsfl_devp = dip;
5098 }
5099 NFSUNLOCKCLSTATE();
5100 if (dip == NULL)
5101 return (ENODEV);
5102 return (0);
5103 }
5104
5105 /*
5106 * Free up a layout structure and associated file layout structure(s).
5107 */
5108 APPLESTATIC void
5109 nfscl_freelayout(struct nfscllayout *layp)
5110 {
5111 struct nfsclflayout *flp, *nflp;
5112 struct nfsclrecalllayout *rp, *nrp;
5113
5114 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayread, nfsfl_list, nflp) {
5115 LIST_REMOVE(flp, nfsfl_list);
5116 nfscl_freeflayout(flp);
5117 }
5118 LIST_FOREACH_SAFE(flp, &layp->nfsly_flayrw, nfsfl_list, nflp) {
5119 LIST_REMOVE(flp, nfsfl_list);
5120 nfscl_freeflayout(flp);
5121 }
5122 LIST_FOREACH_SAFE(rp, &layp->nfsly_recall, nfsrecly_list, nrp) {
5123 LIST_REMOVE(rp, nfsrecly_list);
5124 free(rp, M_NFSLAYRECALL);
5125 }
5126 nfscl_layoutcnt--;
5127 free(layp, M_NFSLAYOUT);
5128 }
5129
5130 /*
5131 * Free up a file layout structure.
5132 */
5133 APPLESTATIC void
5134 nfscl_freeflayout(struct nfsclflayout *flp)
5135 {
5136 int i;
5137
5138 for (i = 0; i < flp->nfsfl_fhcnt; i++)
5139 free(flp->nfsfl_fh[i], M_NFSFH);
5140 if (flp->nfsfl_devp != NULL)
5141 flp->nfsfl_devp->nfsdi_layoutrefs--;
5142 free(flp, M_NFSFLAYOUT);
5143 }
5144
5145 /*
5146 * Free up a file layout devinfo structure.
5147 */
5148 APPLESTATIC void
5149 nfscl_freedevinfo(struct nfscldevinfo *dip)
5150 {
5151
5152 free(dip, M_NFSDEVINFO);
5153 }
5154
5155 /*
5156 * Mark any layouts that match as recalled.
5157 */
5158 static int
5159 nfscl_layoutrecall(int recalltype, struct nfscllayout *lyp, uint32_t iomode,
5160 uint64_t off, uint64_t len, uint32_t stateseqid,
5161 struct nfsclrecalllayout *recallp)
5162 {
5163 struct nfsclrecalllayout *rp, *orp;
5164
5165 recallp->nfsrecly_recalltype = recalltype;
5166 recallp->nfsrecly_iomode = iomode;
5167 recallp->nfsrecly_stateseqid = stateseqid;
5168 recallp->nfsrecly_off = off;
5169 recallp->nfsrecly_len = len;
5170 /*
5171 * Order the list as file returns first, followed by fsid and any
5172 * returns, both in increasing stateseqid order.
5173 * Note that the seqids wrap around, so 1 is after 0xffffffff.
5174 * (I'm not sure this is correct because I find RFC5661 confusing
5175 * on this, but hopefully it will work ok.)
5176 */
5177 orp = NULL;
5178 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5179 orp = rp;
5180 if ((recalltype == NFSLAYOUTRETURN_FILE &&
5181 (rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE ||
5182 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) ||
5183 (recalltype != NFSLAYOUTRETURN_FILE &&
5184 rp->nfsrecly_recalltype != NFSLAYOUTRETURN_FILE &&
5185 nfscl_seq(stateseqid, rp->nfsrecly_stateseqid) != 0)) {
5186 LIST_INSERT_BEFORE(rp, recallp, nfsrecly_list);
5187 break;
5188 }
5189 }
5190 if (rp == NULL) {
5191 if (orp == NULL)
5192 LIST_INSERT_HEAD(&lyp->nfsly_recall, recallp,
5193 nfsrecly_list);
5194 else
5195 LIST_INSERT_AFTER(orp, recallp, nfsrecly_list);
5196 }
5197 lyp->nfsly_flags |= NFSLY_RECALL;
5198 return (0);
5199 }
5200
5201 /*
5202 * Compare the two seqids for ordering. The trick is that the seqids can
5203 * wrap around from 0xffffffff->0, so check for the cases where one
5204 * has wrapped around.
5205 * Return 1 if seqid1 comes before seqid2, 0 otherwise.
5206 */
5207 static int
5208 nfscl_seq(uint32_t seqid1, uint32_t seqid2)
5209 {
5210
5211 if (seqid2 > seqid1 && (seqid2 - seqid1) >= 0x7fffffff)
5212 /* seqid2 has wrapped around. */
5213 return (0);
5214 if (seqid1 > seqid2 && (seqid1 - seqid2) >= 0x7fffffff)
5215 /* seqid1 has wrapped around. */
5216 return (1);
5217 if (seqid1 <= seqid2)
5218 return (1);
5219 return (0);
5220 }
5221
5222 /*
5223 * Do a layout return for each of the recalls.
5224 */
5225 static void
5226 nfscl_layoutreturn(struct nfsmount *nmp, struct nfscllayout *lyp,
5227 struct ucred *cred, NFSPROC_T *p)
5228 {
5229 struct nfsclrecalllayout *rp;
5230 nfsv4stateid_t stateid;
5231
5232 NFSBCOPY(lyp->nfsly_stateid.other, stateid.other, NFSX_STATEIDOTHER);
5233 stateid.seqid = lyp->nfsly_stateid.seqid;
5234 LIST_FOREACH(rp, &lyp->nfsly_recall, nfsrecly_list) {
5235 (void)nfsrpc_layoutreturn(nmp, lyp->nfsly_fh,
5236 lyp->nfsly_fhlen, 0, NFSLAYOUT_NFSV4_1_FILES,
5237 rp->nfsrecly_iomode, rp->nfsrecly_recalltype,
5238 rp->nfsrecly_off, rp->nfsrecly_len,
5239 &stateid, 0, NULL, cred, p, NULL);
5240 }
5241 }
5242
5243 /*
5244 * Do the layout commit for a file layout.
5245 */
5246 static void
5247 nfscl_dolayoutcommit(struct nfsmount *nmp, struct nfscllayout *lyp,
5248 struct ucred *cred, NFSPROC_T *p)
5249 {
5250 struct nfsclflayout *flp;
5251 uint64_t len;
5252 int error;
5253
5254 LIST_FOREACH(flp, &lyp->nfsly_flayrw, nfsfl_list) {
5255 if (flp->nfsfl_off <= lyp->nfsly_lastbyte) {
5256 len = flp->nfsfl_end - flp->nfsfl_off;
5257 error = nfsrpc_layoutcommit(nmp, lyp->nfsly_fh,
5258 lyp->nfsly_fhlen, 0, flp->nfsfl_off, len,
5259 lyp->nfsly_lastbyte, &lyp->nfsly_stateid,
5260 NFSLAYOUT_NFSV4_1_FILES, 0, NULL, cred, p, NULL);
5261 NFSCL_DEBUG(4, "layoutcommit err=%d\n", error);
5262 if (error == NFSERR_NOTSUPP) {
5263 /* If not supported, don't bother doing it. */
5264 NFSLOCKMNT(nmp);
5265 nmp->nm_state |= NFSSTA_NOLAYOUTCOMMIT;
5266 NFSUNLOCKMNT(nmp);
5267 break;
5268 }
5269 }
5270 }
5271 }
5272
5273 /*
5274 * Commit all layouts for a file (vnode).
5275 */
5276 int
5277 nfscl_layoutcommit(vnode_t vp, NFSPROC_T *p)
5278 {
5279 struct nfsclclient *clp;
5280 struct nfscllayout *lyp;
5281 struct nfsnode *np = VTONFS(vp);
5282 mount_t mp;
5283 struct nfsmount *nmp;
5284
5285 mp = vnode_mount(vp);
5286 nmp = VFSTONFS(mp);
5287 if (NFSHASNOLAYOUTCOMMIT(nmp))
5288 return (0);
5289 NFSLOCKCLSTATE();
5290 clp = nmp->nm_clp;
5291 if (clp == NULL) {
5292 NFSUNLOCKCLSTATE();
5293 return (EPERM);
5294 }
5295 lyp = nfscl_findlayout(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len);
5296 if (lyp == NULL) {
5297 NFSUNLOCKCLSTATE();
5298 return (EPERM);
5299 }
5300 nfsv4_getref(&lyp->nfsly_lock, NULL, NFSCLSTATEMUTEXPTR, mp);
5301 if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
5302 NFSUNLOCKCLSTATE();
5303 return (EPERM);
5304 }
5305 tryagain:
5306 if ((lyp->nfsly_flags & NFSLY_WRITTEN) != 0) {
5307 lyp->nfsly_flags &= ~NFSLY_WRITTEN;
5308 NFSUNLOCKCLSTATE();
5309 NFSCL_DEBUG(4, "do layoutcommit2\n");
5310 nfscl_dolayoutcommit(clp->nfsc_nmp, lyp, NFSPROCCRED(p), p);
5311 NFSLOCKCLSTATE();
5312 goto tryagain;
5313 }
5314 nfsv4_relref(&lyp->nfsly_lock);
5315 NFSUNLOCKCLSTATE();
5316 return (0);
5317 }
5318
Cache object: 4a2e2ccff15f260f9a8e866791245879
|