1 /*-
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: releng/8.4/sys/fs/smbfs/smbfs_io.c 225320 2011-09-02 07:37:55Z mm $
33 *
34 */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/fcntl.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/vnode.h>
44 #include <sys/dirent.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/vmmeter.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
55 /*
56 #include <sys/ioccom.h>
57 */
58 #include <netsmb/smb.h>
59 #include <netsmb/smb_conn.h>
60 #include <netsmb/smb_subr.h>
61
62 #include <fs/smbfs/smbfs.h>
63 #include <fs/smbfs/smbfs_node.h>
64 #include <fs/smbfs/smbfs_subr.h>
65
66 /*#define SMBFS_RWGENERIC*/
67
68 extern int smbfs_pbuf_freecnt;
69
70 static int smbfs_fastlookup = 1;
71
72 SYSCTL_DECL(_vfs_smbfs);
73 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
74
75
76 #define DE_SIZE (sizeof(struct dirent))
77
78 static int
79 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
80 {
81 struct dirent de;
82 struct componentname cn;
83 struct smb_cred scred;
84 struct smbfs_fctx *ctx;
85 struct vnode *newvp;
86 struct smbnode *np = VTOSMB(vp);
87 int error/*, *eofflag = ap->a_eofflag*/;
88 long offset, limit;
89
90 np = VTOSMB(vp);
91 SMBVDEBUG("dirname='%s'\n", np->n_name);
92 smb_makescred(&scred, uio->uio_td, cred);
93 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
94 limit = uio->uio_resid / DE_SIZE;
95 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
96 return EINVAL;
97 while (limit && offset < 2) {
98 limit--;
99 bzero((caddr_t)&de, DE_SIZE);
100 de.d_reclen = DE_SIZE;
101 de.d_fileno = (offset == 0) ? np->n_ino :
102 (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
103 if (de.d_fileno == 0)
104 de.d_fileno = 0x7ffffffd + offset;
105 de.d_namlen = offset + 1;
106 de.d_name[0] = '.';
107 de.d_name[1] = '.';
108 de.d_name[offset + 1] = '\0';
109 de.d_type = DT_DIR;
110 error = uiomove(&de, DE_SIZE, uio);
111 if (error)
112 return error;
113 offset++;
114 uio->uio_offset += DE_SIZE;
115 }
116 if (limit == 0)
117 return 0;
118 if (offset != np->n_dirofs || np->n_dirseq == NULL) {
119 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
120 if (np->n_dirseq) {
121 smbfs_findclose(np->n_dirseq, &scred);
122 np->n_dirseq = NULL;
123 }
124 np->n_dirofs = 2;
125 error = smbfs_findopen(np, "*", 1,
126 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
127 &scred, &ctx);
128 if (error) {
129 SMBVDEBUG("can not open search, error = %d", error);
130 return error;
131 }
132 np->n_dirseq = ctx;
133 } else
134 ctx = np->n_dirseq;
135 while (np->n_dirofs < offset) {
136 error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
137 if (error) {
138 smbfs_findclose(np->n_dirseq, &scred);
139 np->n_dirseq = NULL;
140 return error == ENOENT ? 0 : error;
141 }
142 }
143 error = 0;
144 for (; limit; limit--, offset++) {
145 error = smbfs_findnext(ctx, limit, &scred);
146 if (error)
147 break;
148 np->n_dirofs++;
149 bzero((caddr_t)&de, DE_SIZE);
150 de.d_reclen = DE_SIZE;
151 de.d_fileno = ctx->f_attr.fa_ino;
152 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
153 de.d_namlen = ctx->f_nmlen;
154 bcopy(ctx->f_name, de.d_name, de.d_namlen);
155 de.d_name[de.d_namlen] = '\0';
156 if (smbfs_fastlookup) {
157 error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
158 ctx->f_nmlen, &ctx->f_attr, &newvp);
159 if (!error) {
160 cn.cn_nameptr = de.d_name;
161 cn.cn_namelen = de.d_namlen;
162 cache_enter(vp, newvp, &cn);
163 vput(newvp);
164 }
165 }
166 error = uiomove(&de, DE_SIZE, uio);
167 if (error)
168 break;
169 }
170 if (error == ENOENT)
171 error = 0;
172 uio->uio_offset = offset * DE_SIZE;
173 return error;
174 }
175
176 int
177 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
178 {
179 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
180 struct smbnode *np = VTOSMB(vp);
181 struct thread *td;
182 struct vattr vattr;
183 struct smb_cred scred;
184 int error, lks;
185
186 /*
187 * Protect against method which is not supported for now
188 */
189 if (uiop->uio_segflg == UIO_NOCOPY)
190 return EOPNOTSUPP;
191
192 if (vp->v_type != VREG && vp->v_type != VDIR) {
193 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
194 return EIO;
195 }
196 if (uiop->uio_resid == 0)
197 return 0;
198 if (uiop->uio_offset < 0)
199 return EINVAL;
200 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
201 return EFBIG;*/
202 td = uiop->uio_td;
203 if (vp->v_type == VDIR) {
204 lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */
205 if (lks == LK_SHARED)
206 vn_lock(vp, LK_UPGRADE | LK_RETRY);
207 error = smbfs_readvdir(vp, uiop, cred);
208 if (lks == LK_SHARED)
209 vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
210 return error;
211 }
212
213 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
214 if (np->n_flag & NMODIFIED) {
215 smbfs_attr_cacheremove(vp);
216 error = VOP_GETATTR(vp, &vattr, cred);
217 if (error)
218 return error;
219 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
220 } else {
221 error = VOP_GETATTR(vp, &vattr, cred);
222 if (error)
223 return error;
224 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
225 error = smbfs_vinvalbuf(vp, td);
226 if (error)
227 return error;
228 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
229 }
230 }
231 smb_makescred(&scred, td, cred);
232 return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
233 }
234
235 int
236 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
237 struct ucred *cred, int ioflag)
238 {
239 struct smbmount *smp = VTOSMBFS(vp);
240 struct smbnode *np = VTOSMB(vp);
241 struct smb_cred scred;
242 struct thread *td;
243 int error = 0;
244
245 if (vp->v_type != VREG) {
246 SMBERROR("vn types other than VREG unsupported !\n");
247 return EIO;
248 }
249 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
250 if (uiop->uio_offset < 0)
251 return EINVAL;
252 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
253 return (EFBIG);*/
254 td = uiop->uio_td;
255 if (ioflag & (IO_APPEND | IO_SYNC)) {
256 if (np->n_flag & NMODIFIED) {
257 smbfs_attr_cacheremove(vp);
258 error = smbfs_vinvalbuf(vp, td);
259 if (error)
260 return error;
261 }
262 if (ioflag & IO_APPEND) {
263 #ifdef notyet
264 /*
265 * File size can be changed by another client
266 */
267 smbfs_attr_cacheremove(vp);
268 error = VOP_GETATTR(vp, &vattr, cred);
269 if (error) return (error);
270 #endif
271 uiop->uio_offset = np->n_size;
272 }
273 }
274 if (uiop->uio_resid == 0)
275 return 0;
276
277 if (vn_rlimit_fsize(vp, uiop, td))
278 return (EFBIG);
279
280 smb_makescred(&scred, td, cred);
281 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
282 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
283 if (!error) {
284 if (uiop->uio_offset > np->n_size) {
285 np->n_size = uiop->uio_offset;
286 vnode_pager_setsize(vp, np->n_size);
287 }
288 }
289 return error;
290 }
291
292 /*
293 * Do an I/O operation to/from a cache block.
294 */
295 int
296 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
297 {
298 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
299 struct smbnode *np = VTOSMB(vp);
300 struct uio uio, *uiop = &uio;
301 struct iovec io;
302 struct smb_cred scred;
303 int error = 0;
304
305 uiop->uio_iov = &io;
306 uiop->uio_iovcnt = 1;
307 uiop->uio_segflg = UIO_SYSSPACE;
308 uiop->uio_td = td;
309
310 smb_makescred(&scred, td, cr);
311
312 if (bp->b_iocmd == BIO_READ) {
313 io.iov_len = uiop->uio_resid = bp->b_bcount;
314 io.iov_base = bp->b_data;
315 uiop->uio_rw = UIO_READ;
316 switch (vp->v_type) {
317 case VREG:
318 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
319 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
320 if (error)
321 break;
322 if (uiop->uio_resid) {
323 int left = uiop->uio_resid;
324 int nread = bp->b_bcount - left;
325 if (left > 0)
326 bzero((char *)bp->b_data + nread, left);
327 }
328 break;
329 default:
330 printf("smbfs_doio: type %x unexpected\n",vp->v_type);
331 break;
332 };
333 if (error) {
334 bp->b_error = error;
335 bp->b_ioflags |= BIO_ERROR;
336 }
337 } else { /* write */
338 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
339 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
340
341 if (bp->b_dirtyend > bp->b_dirtyoff) {
342 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
343 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
344 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
345 uiop->uio_rw = UIO_WRITE;
346 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
347
348 /*
349 * For an interrupted write, the buffer is still valid
350 * and the write hasn't been pushed to the server yet,
351 * so we can't set BIO_ERROR and report the interruption
352 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
353 * is not relevant, so the rpc attempt is essentially
354 * a noop. For the case of a V3 write rpc not being
355 * committed to stable storage, the block is still
356 * dirty and requires either a commit rpc or another
357 * write rpc with iomode == NFSV3WRITE_FILESYNC before
358 * the block is reused. This is indicated by setting
359 * the B_DELWRI and B_NEEDCOMMIT flags.
360 */
361 if (error == EINTR
362 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
363 int s;
364
365 s = splbio();
366 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
367 if ((bp->b_flags & B_ASYNC) == 0)
368 bp->b_flags |= B_EINTR;
369 if ((bp->b_flags & B_PAGING) == 0) {
370 bdirty(bp);
371 bp->b_flags &= ~B_DONE;
372 }
373 if ((bp->b_flags & B_ASYNC) == 0)
374 bp->b_flags |= B_EINTR;
375 splx(s);
376 } else {
377 if (error) {
378 bp->b_ioflags |= BIO_ERROR;
379 bp->b_error = error;
380 }
381 bp->b_dirtyoff = bp->b_dirtyend = 0;
382 }
383 } else {
384 bp->b_resid = 0;
385 bufdone(bp);
386 return 0;
387 }
388 }
389 bp->b_resid = uiop->uio_resid;
390 bufdone(bp);
391 return error;
392 }
393
394 /*
395 * Vnode op for VM getpages.
396 * Wish wish .... get rid from multiple IO routines
397 */
398 int
399 smbfs_getpages(ap)
400 struct vop_getpages_args /* {
401 struct vnode *a_vp;
402 vm_page_t *a_m;
403 int a_count;
404 int a_reqpage;
405 vm_ooffset_t a_offset;
406 } */ *ap;
407 {
408 #ifdef SMBFS_RWGENERIC
409 return vop_stdgetpages(ap);
410 #else
411 int i, error, nextoff, size, toff, npages, count, reqpage;
412 struct uio uio;
413 struct iovec iov;
414 vm_offset_t kva;
415 struct buf *bp;
416 struct vnode *vp;
417 struct thread *td;
418 struct ucred *cred;
419 struct smbmount *smp;
420 struct smbnode *np;
421 struct smb_cred scred;
422 vm_object_t object;
423 vm_page_t *pages, m;
424
425 vp = ap->a_vp;
426 if ((object = vp->v_object) == NULL) {
427 printf("smbfs_getpages: called with non-merged cache vnode??\n");
428 return VM_PAGER_ERROR;
429 }
430
431 td = curthread; /* XXX */
432 cred = td->td_ucred; /* XXX */
433 np = VTOSMB(vp);
434 smp = VFSTOSMBFS(vp->v_mount);
435 pages = ap->a_m;
436 count = ap->a_count;
437 npages = btoc(count);
438 reqpage = ap->a_reqpage;
439
440 /*
441 * If the requested page is partially valid, just return it and
442 * allow the pager to zero-out the blanks. Partially valid pages
443 * can only occur at the file EOF.
444 */
445 m = pages[reqpage];
446
447 VM_OBJECT_LOCK(object);
448 if (m->valid != 0) {
449 vm_page_lock_queues();
450 for (i = 0; i < npages; ++i) {
451 if (i != reqpage)
452 vm_page_free(pages[i]);
453 }
454 vm_page_unlock_queues();
455 VM_OBJECT_UNLOCK(object);
456 return 0;
457 }
458 VM_OBJECT_UNLOCK(object);
459
460 smb_makescred(&scred, td, cred);
461
462 bp = getpbuf(&smbfs_pbuf_freecnt);
463
464 kva = (vm_offset_t) bp->b_data;
465 pmap_qenter(kva, pages, npages);
466 PCPU_INC(cnt.v_vnodein);
467 PCPU_ADD(cnt.v_vnodepgsin, npages);
468
469 iov.iov_base = (caddr_t) kva;
470 iov.iov_len = count;
471 uio.uio_iov = &iov;
472 uio.uio_iovcnt = 1;
473 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
474 uio.uio_resid = count;
475 uio.uio_segflg = UIO_SYSSPACE;
476 uio.uio_rw = UIO_READ;
477 uio.uio_td = td;
478
479 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
480 pmap_qremove(kva, npages);
481
482 relpbuf(bp, &smbfs_pbuf_freecnt);
483
484 VM_OBJECT_LOCK(object);
485 if (error && (uio.uio_resid == count)) {
486 printf("smbfs_getpages: error %d\n",error);
487 vm_page_lock_queues();
488 for (i = 0; i < npages; i++) {
489 if (reqpage != i)
490 vm_page_free(pages[i]);
491 }
492 vm_page_unlock_queues();
493 VM_OBJECT_UNLOCK(object);
494 return VM_PAGER_ERROR;
495 }
496
497 size = count - uio.uio_resid;
498
499 vm_page_lock_queues();
500 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
501 vm_page_t m;
502 nextoff = toff + PAGE_SIZE;
503 m = pages[i];
504
505 if (nextoff <= size) {
506 /*
507 * Read operation filled an entire page
508 */
509 m->valid = VM_PAGE_BITS_ALL;
510 KASSERT(m->dirty == 0,
511 ("smbfs_getpages: page %p is dirty", m));
512 } else if (size > toff) {
513 /*
514 * Read operation filled a partial page.
515 */
516 m->valid = 0;
517 vm_page_set_valid(m, 0, size - toff);
518 KASSERT(m->dirty == 0,
519 ("smbfs_getpages: page %p is dirty", m));
520 } else {
521 /*
522 * Read operation was short. If no error occured
523 * we may have hit a zero-fill section. We simply
524 * leave valid set to 0.
525 */
526 ;
527 }
528
529 if (i != reqpage) {
530 /*
531 * Whether or not to leave the page activated is up in
532 * the air, but we should put the page on a page queue
533 * somewhere (it already is in the object). Result:
534 * It appears that emperical results show that
535 * deactivating pages is best.
536 */
537
538 /*
539 * Just in case someone was asking for this page we
540 * now tell them that it is ok to use.
541 */
542 if (!error) {
543 if (m->oflags & VPO_WANTED)
544 vm_page_activate(m);
545 else
546 vm_page_deactivate(m);
547 vm_page_wakeup(m);
548 } else {
549 vm_page_free(m);
550 }
551 }
552 }
553 vm_page_unlock_queues();
554 VM_OBJECT_UNLOCK(object);
555 return 0;
556 #endif /* SMBFS_RWGENERIC */
557 }
558
559 /*
560 * Vnode op for VM putpages.
561 * possible bug: all IO done in sync mode
562 * Note that vop_close always invalidate pages before close, so it's
563 * not necessary to open vnode.
564 */
565 int
566 smbfs_putpages(ap)
567 struct vop_putpages_args /* {
568 struct vnode *a_vp;
569 vm_page_t *a_m;
570 int a_count;
571 int a_sync;
572 int *a_rtvals;
573 vm_ooffset_t a_offset;
574 } */ *ap;
575 {
576 int error;
577 struct vnode *vp = ap->a_vp;
578 struct thread *td;
579 struct ucred *cred;
580
581 #ifdef SMBFS_RWGENERIC
582 td = curthread; /* XXX */
583 cred = td->td_ucred; /* XXX */
584 VOP_OPEN(vp, FWRITE, cred, td, NULL);
585 error = vop_stdputpages(ap);
586 VOP_CLOSE(vp, FWRITE, cred, td);
587 return error;
588 #else
589 struct uio uio;
590 struct iovec iov;
591 vm_offset_t kva;
592 struct buf *bp;
593 int i, npages, count;
594 int *rtvals;
595 struct smbmount *smp;
596 struct smbnode *np;
597 struct smb_cred scred;
598 vm_page_t *pages;
599
600 td = curthread; /* XXX */
601 cred = td->td_ucred; /* XXX */
602 /* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
603 np = VTOSMB(vp);
604 smp = VFSTOSMBFS(vp->v_mount);
605 pages = ap->a_m;
606 count = ap->a_count;
607 rtvals = ap->a_rtvals;
608 npages = btoc(count);
609
610 for (i = 0; i < npages; i++) {
611 rtvals[i] = VM_PAGER_ERROR;
612 }
613
614 bp = getpbuf(&smbfs_pbuf_freecnt);
615
616 kva = (vm_offset_t) bp->b_data;
617 pmap_qenter(kva, pages, npages);
618 PCPU_INC(cnt.v_vnodeout);
619 PCPU_ADD(cnt.v_vnodepgsout, count);
620
621 iov.iov_base = (caddr_t) kva;
622 iov.iov_len = count;
623 uio.uio_iov = &iov;
624 uio.uio_iovcnt = 1;
625 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
626 uio.uio_resid = count;
627 uio.uio_segflg = UIO_SYSSPACE;
628 uio.uio_rw = UIO_WRITE;
629 uio.uio_td = td;
630 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
631
632 smb_makescred(&scred, td, cred);
633 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
634 /* VOP_CLOSE(vp, FWRITE, cred, td);*/
635 SMBVDEBUG("paged write done: %d\n", error);
636
637 pmap_qremove(kva, npages);
638
639 relpbuf(bp, &smbfs_pbuf_freecnt);
640
641 if (!error)
642 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
643 return rtvals[0];
644 #endif /* SMBFS_RWGENERIC */
645 }
646
647 /*
648 * Flush and invalidate all dirty buffers. If another process is already
649 * doing the flush, just wait for completion.
650 */
651 int
652 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
653 {
654 struct smbnode *np = VTOSMB(vp);
655 int error = 0;
656
657 if (vp->v_iflag & VI_DOOMED)
658 return 0;
659
660 while (np->n_flag & NFLUSHINPROG) {
661 np->n_flag |= NFLUSHWANT;
662 error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
663 error = smb_td_intr(td);
664 if (error == EINTR)
665 return EINTR;
666 }
667 np->n_flag |= NFLUSHINPROG;
668
669 if (vp->v_bufobj.bo_object != NULL) {
670 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
671 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
672 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
673 }
674
675 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
676 while (error) {
677 if (error == ERESTART || error == EINTR) {
678 np->n_flag &= ~NFLUSHINPROG;
679 if (np->n_flag & NFLUSHWANT) {
680 np->n_flag &= ~NFLUSHWANT;
681 wakeup(&np->n_flag);
682 }
683 return EINTR;
684 }
685 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
686 }
687 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
688 if (np->n_flag & NFLUSHWANT) {
689 np->n_flag &= ~NFLUSHWANT;
690 wakeup(&np->n_flag);
691 }
692 return (error);
693 }
Cache object: f1d80e6764c245d079a09d5bd451e1d4
|