1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2000-2001 Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 *
30 */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
35 #include <sys/bio.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/vnode.h>
40 #include <sys/dirent.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/vmmeter.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vnode_pager.h>
53 /*
54 #include <sys/ioccom.h>
55 */
56 #include <netsmb/smb.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_subr.h>
59
60 #include <fs/smbfs/smbfs.h>
61 #include <fs/smbfs/smbfs_node.h>
62 #include <fs/smbfs/smbfs_subr.h>
63
64 /*#define SMBFS_RWGENERIC*/
65
66 extern int smbfs_pbuf_freecnt;
67
68 static int smbfs_fastlookup = 1;
69
70 SYSCTL_DECL(_vfs_smbfs);
71 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
72
73
74 #define DE_SIZE (sizeof(struct dirent))
75
76 static int
77 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
78 {
79 struct dirent de;
80 struct componentname cn;
81 struct smb_cred *scred;
82 struct smbfs_fctx *ctx;
83 struct vnode *newvp;
84 struct smbnode *np = VTOSMB(vp);
85 int error/*, *eofflag = ap->a_eofflag*/;
86 long offset, limit;
87
88 np = VTOSMB(vp);
89 SMBVDEBUG("dirname='%s'\n", np->n_name);
90 scred = smbfs_malloc_scred();
91 smb_makescred(scred, uio->uio_td, cred);
92 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
93 limit = uio->uio_resid / DE_SIZE;
94 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
95 error = EINVAL;
96 goto out;
97 }
98 while (limit && offset < 2) {
99 limit--;
100 bzero((caddr_t)&de, DE_SIZE);
101 de.d_reclen = DE_SIZE;
102 de.d_fileno = (offset == 0) ? np->n_ino :
103 (np->n_parent ? np->n_parentino : 2);
104 if (de.d_fileno == 0)
105 de.d_fileno = 0x7ffffffd + offset;
106 de.d_off = offset + 1;
107 de.d_namlen = offset + 1;
108 de.d_name[0] = '.';
109 de.d_name[1] = '.';
110 de.d_type = DT_DIR;
111 dirent_terminate(&de);
112 error = uiomove(&de, DE_SIZE, uio);
113 if (error)
114 goto out;
115 offset++;
116 uio->uio_offset += DE_SIZE;
117 }
118 if (limit == 0) {
119 error = 0;
120 goto out;
121 }
122 if (offset != np->n_dirofs || np->n_dirseq == NULL) {
123 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
124 if (np->n_dirseq) {
125 smbfs_findclose(np->n_dirseq, scred);
126 np->n_dirseq = NULL;
127 }
128 np->n_dirofs = 2;
129 error = smbfs_findopen(np, "*", 1,
130 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
131 scred, &ctx);
132 if (error) {
133 SMBVDEBUG("can not open search, error = %d", error);
134 goto out;
135 }
136 np->n_dirseq = ctx;
137 } else
138 ctx = np->n_dirseq;
139 while (np->n_dirofs < offset) {
140 error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
141 if (error) {
142 smbfs_findclose(np->n_dirseq, scred);
143 np->n_dirseq = NULL;
144 error = ENOENT ? 0 : error;
145 goto out;
146 }
147 }
148 error = 0;
149 for (; limit; limit--, offset++) {
150 error = smbfs_findnext(ctx, limit, scred);
151 if (error)
152 break;
153 np->n_dirofs++;
154 bzero((caddr_t)&de, DE_SIZE);
155 de.d_reclen = DE_SIZE;
156 de.d_fileno = ctx->f_attr.fa_ino;
157 de.d_off = offset + 1;
158 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
159 de.d_namlen = ctx->f_nmlen;
160 bcopy(ctx->f_name, de.d_name, de.d_namlen);
161 dirent_terminate(&de);
162 if (smbfs_fastlookup) {
163 error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
164 ctx->f_nmlen, &ctx->f_attr, &newvp);
165 if (!error) {
166 cn.cn_nameptr = de.d_name;
167 cn.cn_namelen = de.d_namlen;
168 cache_enter(vp, newvp, &cn);
169 vput(newvp);
170 }
171 }
172 error = uiomove(&de, DE_SIZE, uio);
173 if (error)
174 break;
175 }
176 if (error == ENOENT)
177 error = 0;
178 uio->uio_offset = offset * DE_SIZE;
179 out:
180 smbfs_free_scred(scred);
181 return error;
182 }
183
184 int
185 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
186 {
187 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
188 struct smbnode *np = VTOSMB(vp);
189 struct thread *td;
190 struct vattr vattr;
191 struct smb_cred *scred;
192 int error, lks;
193
194 /*
195 * Protect against method which is not supported for now
196 */
197 if (uiop->uio_segflg == UIO_NOCOPY)
198 return EOPNOTSUPP;
199
200 if (vp->v_type != VREG && vp->v_type != VDIR) {
201 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
202 return EIO;
203 }
204 if (uiop->uio_resid == 0)
205 return 0;
206 if (uiop->uio_offset < 0)
207 return EINVAL;
208 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
209 return EFBIG;*/
210 td = uiop->uio_td;
211 if (vp->v_type == VDIR) {
212 lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */
213 if (lks == LK_SHARED)
214 vn_lock(vp, LK_UPGRADE | LK_RETRY);
215 error = smbfs_readvdir(vp, uiop, cred);
216 if (lks == LK_SHARED)
217 vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
218 return error;
219 }
220
221 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
222 if (np->n_flag & NMODIFIED) {
223 smbfs_attr_cacheremove(vp);
224 error = VOP_GETATTR(vp, &vattr, cred);
225 if (error)
226 return error;
227 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
228 } else {
229 error = VOP_GETATTR(vp, &vattr, cred);
230 if (error)
231 return error;
232 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
233 error = smbfs_vinvalbuf(vp, td);
234 if (error)
235 return error;
236 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
237 }
238 }
239 scred = smbfs_malloc_scred();
240 smb_makescred(scred, td, cred);
241 error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
242 smbfs_free_scred(scred);
243 return (error);
244 }
245
246 int
247 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
248 struct ucred *cred, int ioflag)
249 {
250 struct smbmount *smp = VTOSMBFS(vp);
251 struct smbnode *np = VTOSMB(vp);
252 struct smb_cred *scred;
253 struct thread *td;
254 int error = 0;
255
256 if (vp->v_type != VREG) {
257 SMBERROR("vn types other than VREG unsupported !\n");
258 return EIO;
259 }
260 SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
261 uiop->uio_resid);
262 if (uiop->uio_offset < 0)
263 return EINVAL;
264 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
265 return (EFBIG);*/
266 td = uiop->uio_td;
267 if (ioflag & (IO_APPEND | IO_SYNC)) {
268 if (np->n_flag & NMODIFIED) {
269 smbfs_attr_cacheremove(vp);
270 error = smbfs_vinvalbuf(vp, td);
271 if (error)
272 return error;
273 }
274 if (ioflag & IO_APPEND) {
275 #ifdef notyet
276 /*
277 * File size can be changed by another client
278 */
279 smbfs_attr_cacheremove(vp);
280 error = VOP_GETATTR(vp, &vattr, cred);
281 if (error) return (error);
282 #endif
283 uiop->uio_offset = np->n_size;
284 }
285 }
286 if (uiop->uio_resid == 0)
287 return 0;
288
289 if (vn_rlimit_fsize(vp, uiop, td))
290 return (EFBIG);
291
292 scred = smbfs_malloc_scred();
293 smb_makescred(scred, td, cred);
294 error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
295 smbfs_free_scred(scred);
296 SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
297 uiop->uio_resid);
298 if (!error) {
299 if (uiop->uio_offset > np->n_size) {
300 np->n_size = uiop->uio_offset;
301 vnode_pager_setsize(vp, np->n_size);
302 }
303 }
304 return error;
305 }
306
307 /*
308 * Do an I/O operation to/from a cache block.
309 */
310 int
311 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
312 {
313 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
314 struct smbnode *np = VTOSMB(vp);
315 struct uio *uiop;
316 struct iovec io;
317 struct smb_cred *scred;
318 int error = 0;
319
320 uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
321 uiop->uio_iov = &io;
322 uiop->uio_iovcnt = 1;
323 uiop->uio_segflg = UIO_SYSSPACE;
324 uiop->uio_td = td;
325
326 scred = smbfs_malloc_scred();
327 smb_makescred(scred, td, cr);
328
329 if (bp->b_iocmd == BIO_READ) {
330 io.iov_len = uiop->uio_resid = bp->b_bcount;
331 io.iov_base = bp->b_data;
332 uiop->uio_rw = UIO_READ;
333 switch (vp->v_type) {
334 case VREG:
335 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
336 error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
337 if (error)
338 break;
339 if (uiop->uio_resid) {
340 int left = uiop->uio_resid;
341 int nread = bp->b_bcount - left;
342 if (left > 0)
343 bzero((char *)bp->b_data + nread, left);
344 }
345 break;
346 default:
347 printf("smbfs_doio: type %x unexpected\n",vp->v_type);
348 break;
349 }
350 if (error) {
351 bp->b_error = error;
352 bp->b_ioflags |= BIO_ERROR;
353 }
354 } else { /* write */
355 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
356 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
357
358 if (bp->b_dirtyend > bp->b_dirtyoff) {
359 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
360 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
361 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
362 uiop->uio_rw = UIO_WRITE;
363 error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
364
365 /*
366 * For an interrupted write, the buffer is still valid
367 * and the write hasn't been pushed to the server yet,
368 * so we can't set BIO_ERROR and report the interruption
369 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
370 * is not relevant, so the rpc attempt is essentially
371 * a noop. For the case of a V3 write rpc not being
372 * committed to stable storage, the block is still
373 * dirty and requires either a commit rpc or another
374 * write rpc with iomode == NFSV3WRITE_FILESYNC before
375 * the block is reused. This is indicated by setting
376 * the B_DELWRI and B_NEEDCOMMIT flags.
377 */
378 if (error == EINTR
379 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
380 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
381 if ((bp->b_flags & B_ASYNC) == 0)
382 bp->b_flags |= B_EINTR;
383 if ((bp->b_flags & B_PAGING) == 0) {
384 bdirty(bp);
385 bp->b_flags &= ~B_DONE;
386 }
387 if ((bp->b_flags & B_ASYNC) == 0)
388 bp->b_flags |= B_EINTR;
389 } else {
390 if (error) {
391 bp->b_ioflags |= BIO_ERROR;
392 bp->b_error = error;
393 }
394 bp->b_dirtyoff = bp->b_dirtyend = 0;
395 }
396 } else {
397 bp->b_resid = 0;
398 bufdone(bp);
399 free(uiop, M_SMBFSDATA);
400 smbfs_free_scred(scred);
401 return 0;
402 }
403 }
404 bp->b_resid = uiop->uio_resid;
405 bufdone(bp);
406 free(uiop, M_SMBFSDATA);
407 smbfs_free_scred(scred);
408 return error;
409 }
410
411 /*
412 * Vnode op for VM getpages.
413 * Wish wish .... get rid from multiple IO routines
414 */
415 int
416 smbfs_getpages(ap)
417 struct vop_getpages_args /* {
418 struct vnode *a_vp;
419 vm_page_t *a_m;
420 int a_count;
421 int a_reqpage;
422 } */ *ap;
423 {
424 #ifdef SMBFS_RWGENERIC
425 return vop_stdgetpages(ap);
426 #else
427 int i, error, nextoff, size, toff, npages, count;
428 struct uio uio;
429 struct iovec iov;
430 vm_offset_t kva;
431 struct buf *bp;
432 struct vnode *vp;
433 struct thread *td;
434 struct ucred *cred;
435 struct smbmount *smp;
436 struct smbnode *np;
437 struct smb_cred *scred;
438 vm_object_t object;
439 vm_page_t *pages;
440
441 vp = ap->a_vp;
442 if ((object = vp->v_object) == NULL) {
443 printf("smbfs_getpages: called with non-merged cache vnode??\n");
444 return VM_PAGER_ERROR;
445 }
446
447 td = curthread; /* XXX */
448 cred = td->td_ucred; /* XXX */
449 np = VTOSMB(vp);
450 smp = VFSTOSMBFS(vp->v_mount);
451 pages = ap->a_m;
452 npages = ap->a_count;
453
454 /*
455 * If the requested page is partially valid, just return it and
456 * allow the pager to zero-out the blanks. Partially valid pages
457 * can only occur at the file EOF.
458 *
459 * XXXGL: is that true for SMB filesystem?
460 */
461 VM_OBJECT_WLOCK(object);
462 if (pages[npages - 1]->valid != 0 && --npages == 0)
463 goto out;
464 VM_OBJECT_WUNLOCK(object);
465
466 scred = smbfs_malloc_scred();
467 smb_makescred(scred, td, cred);
468
469 bp = getpbuf(&smbfs_pbuf_freecnt);
470
471 kva = (vm_offset_t) bp->b_data;
472 pmap_qenter(kva, pages, npages);
473 VM_CNT_INC(v_vnodein);
474 VM_CNT_ADD(v_vnodepgsin, npages);
475
476 count = npages << PAGE_SHIFT;
477 iov.iov_base = (caddr_t) kva;
478 iov.iov_len = count;
479 uio.uio_iov = &iov;
480 uio.uio_iovcnt = 1;
481 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
482 uio.uio_resid = count;
483 uio.uio_segflg = UIO_SYSSPACE;
484 uio.uio_rw = UIO_READ;
485 uio.uio_td = td;
486
487 error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
488 smbfs_free_scred(scred);
489 pmap_qremove(kva, npages);
490
491 relpbuf(bp, &smbfs_pbuf_freecnt);
492
493 if (error && (uio.uio_resid == count)) {
494 printf("smbfs_getpages: error %d\n",error);
495 return VM_PAGER_ERROR;
496 }
497
498 size = count - uio.uio_resid;
499
500 VM_OBJECT_WLOCK(object);
501 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
502 vm_page_t m;
503 nextoff = toff + PAGE_SIZE;
504 m = pages[i];
505
506 if (nextoff <= size) {
507 /*
508 * Read operation filled an entire page
509 */
510 m->valid = VM_PAGE_BITS_ALL;
511 KASSERT(m->dirty == 0,
512 ("smbfs_getpages: page %p is dirty", m));
513 } else if (size > toff) {
514 /*
515 * Read operation filled a partial page.
516 */
517 m->valid = 0;
518 vm_page_set_valid_range(m, 0, size - toff);
519 KASSERT(m->dirty == 0,
520 ("smbfs_getpages: page %p is dirty", m));
521 } else {
522 /*
523 * Read operation was short. If no error occurred
524 * we may have hit a zero-fill section. We simply
525 * leave valid set to 0.
526 */
527 ;
528 }
529 }
530 out:
531 VM_OBJECT_WUNLOCK(object);
532 if (ap->a_rbehind)
533 *ap->a_rbehind = 0;
534 if (ap->a_rahead)
535 *ap->a_rahead = 0;
536 return (VM_PAGER_OK);
537 #endif /* SMBFS_RWGENERIC */
538 }
539
540 /*
541 * Vnode op for VM putpages.
542 * possible bug: all IO done in sync mode
543 * Note that vop_close always invalidate pages before close, so it's
544 * not necessary to open vnode.
545 */
546 int
547 smbfs_putpages(ap)
548 struct vop_putpages_args /* {
549 struct vnode *a_vp;
550 vm_page_t *a_m;
551 int a_count;
552 int a_sync;
553 int *a_rtvals;
554 } */ *ap;
555 {
556 int error;
557 struct vnode *vp = ap->a_vp;
558 struct thread *td;
559 struct ucred *cred;
560
561 #ifdef SMBFS_RWGENERIC
562 td = curthread; /* XXX */
563 cred = td->td_ucred; /* XXX */
564 VOP_OPEN(vp, FWRITE, cred, td, NULL);
565 error = vop_stdputpages(ap);
566 VOP_CLOSE(vp, FWRITE, cred, td);
567 return error;
568 #else
569 struct uio uio;
570 struct iovec iov;
571 vm_offset_t kva;
572 struct buf *bp;
573 int i, npages, count;
574 int *rtvals;
575 struct smbmount *smp;
576 struct smbnode *np;
577 struct smb_cred *scred;
578 vm_page_t *pages;
579
580 td = curthread; /* XXX */
581 cred = td->td_ucred; /* XXX */
582 /* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
583 np = VTOSMB(vp);
584 smp = VFSTOSMBFS(vp->v_mount);
585 pages = ap->a_m;
586 count = ap->a_count;
587 rtvals = ap->a_rtvals;
588 npages = btoc(count);
589
590 for (i = 0; i < npages; i++) {
591 rtvals[i] = VM_PAGER_ERROR;
592 }
593
594 bp = getpbuf(&smbfs_pbuf_freecnt);
595
596 kva = (vm_offset_t) bp->b_data;
597 pmap_qenter(kva, pages, npages);
598 VM_CNT_INC(v_vnodeout);
599 VM_CNT_ADD(v_vnodepgsout, count);
600
601 iov.iov_base = (caddr_t) kva;
602 iov.iov_len = count;
603 uio.uio_iov = &iov;
604 uio.uio_iovcnt = 1;
605 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
606 uio.uio_resid = count;
607 uio.uio_segflg = UIO_SYSSPACE;
608 uio.uio_rw = UIO_WRITE;
609 uio.uio_td = td;
610 SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
611 uio.uio_resid);
612
613 scred = smbfs_malloc_scred();
614 smb_makescred(scred, td, cred);
615 error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
616 smbfs_free_scred(scred);
617 /* VOP_CLOSE(vp, FWRITE, cred, td);*/
618 SMBVDEBUG("paged write done: %d\n", error);
619
620 pmap_qremove(kva, npages);
621
622 relpbuf(bp, &smbfs_pbuf_freecnt);
623
624 if (error == 0) {
625 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
626 npages * PAGE_SIZE, npages * PAGE_SIZE);
627 }
628 return (rtvals[0]);
629 #endif /* SMBFS_RWGENERIC */
630 }
631
632 /*
633 * Flush and invalidate all dirty buffers. If another process is already
634 * doing the flush, just wait for completion.
635 */
636 int
637 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
638 {
639 struct smbnode *np = VTOSMB(vp);
640 int error = 0;
641
642 if (vp->v_iflag & VI_DOOMED)
643 return 0;
644
645 while (np->n_flag & NFLUSHINPROG) {
646 np->n_flag |= NFLUSHWANT;
647 error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
648 error = smb_td_intr(td);
649 if (error == EINTR)
650 return EINTR;
651 }
652 np->n_flag |= NFLUSHINPROG;
653
654 if (vp->v_bufobj.bo_object != NULL) {
655 VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
656 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
657 VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
658 }
659
660 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
661 while (error) {
662 if (error == ERESTART || error == EINTR) {
663 np->n_flag &= ~NFLUSHINPROG;
664 if (np->n_flag & NFLUSHWANT) {
665 np->n_flag &= ~NFLUSHWANT;
666 wakeup(&np->n_flag);
667 }
668 return EINTR;
669 }
670 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
671 }
672 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
673 if (np->n_flag & NFLUSHWANT) {
674 np->n_flag &= ~NFLUSHWANT;
675 wakeup(&np->n_flag);
676 }
677 return (error);
678 }
Cache object: eb92212230c2e4d0f58849627110db47
|