1 /*-
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 *
34 */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/resourcevar.h> /* defines plimit structure in proc struct */
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/fcntl.h>
41 #include <sys/bio.h>
42 #include <sys/buf.h>
43 #include <sys/mount.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/dirent.h>
47 #include <sys/signalvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/vmmeter.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vnode_pager.h>
57 /*
58 #include <sys/ioccom.h>
59 */
60 #include <netsmb/smb.h>
61 #include <netsmb/smb_conn.h>
62 #include <netsmb/smb_subr.h>
63
64 #include <fs/smbfs/smbfs.h>
65 #include <fs/smbfs/smbfs_node.h>
66 #include <fs/smbfs/smbfs_subr.h>
67
68 /*#define SMBFS_RWGENERIC*/
69
70 extern int smbfs_pbuf_freecnt;
71
72 static int smbfs_fastlookup = 1;
73
74 SYSCTL_DECL(_vfs_smbfs);
75 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
76
77
78 #define DE_SIZE (sizeof(struct dirent))
79
80 static int
81 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
82 {
83 struct dirent de;
84 struct componentname cn;
85 struct smb_cred scred;
86 struct smbfs_fctx *ctx;
87 struct vnode *newvp;
88 struct smbnode *np = VTOSMB(vp);
89 int error/*, *eofflag = ap->a_eofflag*/;
90 long offset, limit;
91
92 np = VTOSMB(vp);
93 SMBVDEBUG("dirname='%s'\n", np->n_name);
94 smb_makescred(&scred, uio->uio_td, cred);
95 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
96 limit = uio->uio_resid / DE_SIZE;
97 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
98 return EINVAL;
99 while (limit && offset < 2) {
100 limit--;
101 bzero((caddr_t)&de, DE_SIZE);
102 de.d_reclen = DE_SIZE;
103 de.d_fileno = (offset == 0) ? np->n_ino :
104 (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
105 if (de.d_fileno == 0)
106 de.d_fileno = 0x7ffffffd + offset;
107 de.d_namlen = offset + 1;
108 de.d_name[0] = '.';
109 de.d_name[1] = '.';
110 de.d_name[offset + 1] = '\0';
111 de.d_type = DT_DIR;
112 error = uiomove(&de, DE_SIZE, uio);
113 if (error)
114 return error;
115 offset++;
116 uio->uio_offset += DE_SIZE;
117 }
118 if (limit == 0)
119 return 0;
120 if (offset != np->n_dirofs || np->n_dirseq == NULL) {
121 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
122 if (np->n_dirseq) {
123 smbfs_findclose(np->n_dirseq, &scred);
124 np->n_dirseq = NULL;
125 }
126 np->n_dirofs = 2;
127 error = smbfs_findopen(np, "*", 1,
128 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
129 &scred, &ctx);
130 if (error) {
131 SMBVDEBUG("can not open search, error = %d", error);
132 return error;
133 }
134 np->n_dirseq = ctx;
135 } else
136 ctx = np->n_dirseq;
137 while (np->n_dirofs < offset) {
138 error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
139 if (error) {
140 smbfs_findclose(np->n_dirseq, &scred);
141 np->n_dirseq = NULL;
142 return error == ENOENT ? 0 : error;
143 }
144 }
145 error = 0;
146 for (; limit; limit--, offset++) {
147 error = smbfs_findnext(ctx, limit, &scred);
148 if (error)
149 break;
150 np->n_dirofs++;
151 bzero((caddr_t)&de, DE_SIZE);
152 de.d_reclen = DE_SIZE;
153 de.d_fileno = ctx->f_attr.fa_ino;
154 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155 de.d_namlen = ctx->f_nmlen;
156 bcopy(ctx->f_name, de.d_name, de.d_namlen);
157 de.d_name[de.d_namlen] = '\0';
158 if (smbfs_fastlookup) {
159 error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160 ctx->f_nmlen, &ctx->f_attr, &newvp);
161 if (!error) {
162 cn.cn_nameptr = de.d_name;
163 cn.cn_namelen = de.d_namlen;
164 cache_enter(vp, newvp, &cn);
165 vput(newvp);
166 }
167 }
168 error = uiomove(&de, DE_SIZE, uio);
169 if (error)
170 break;
171 }
172 if (error == ENOENT)
173 error = 0;
174 uio->uio_offset = offset * DE_SIZE;
175 return error;
176 }
177
178 int
179 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
180 {
181 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
182 struct smbnode *np = VTOSMB(vp);
183 struct thread *td;
184 struct vattr vattr;
185 struct smb_cred scred;
186 int error, lks;
187
188 /*
189 * Protect against method which is not supported for now
190 */
191 if (uiop->uio_segflg == UIO_NOCOPY)
192 return EOPNOTSUPP;
193
194 if (vp->v_type != VREG && vp->v_type != VDIR) {
195 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
196 return EIO;
197 }
198 if (uiop->uio_resid == 0)
199 return 0;
200 if (uiop->uio_offset < 0)
201 return EINVAL;
202 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
203 return EFBIG;*/
204 td = uiop->uio_td;
205 if (vp->v_type == VDIR) {
206 lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/
207 if (lks == LK_SHARED)
208 vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
209 error = smbfs_readvdir(vp, uiop, cred);
210 if (lks == LK_SHARED)
211 vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td);
212 return error;
213 }
214
215 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
216 if (np->n_flag & NMODIFIED) {
217 smbfs_attr_cacheremove(vp);
218 error = VOP_GETATTR(vp, &vattr, cred, td);
219 if (error)
220 return error;
221 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
222 } else {
223 error = VOP_GETATTR(vp, &vattr, cred, td);
224 if (error)
225 return error;
226 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
227 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
228 if (error)
229 return error;
230 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
231 }
232 }
233 smb_makescred(&scred, td, cred);
234 return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
235 }
236
237 int
238 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
239 struct ucred *cred, int ioflag)
240 {
241 struct smbmount *smp = VTOSMBFS(vp);
242 struct smbnode *np = VTOSMB(vp);
243 struct smb_cred scred;
244 struct proc *p;
245 struct thread *td;
246 int error = 0;
247
248 if (vp->v_type != VREG) {
249 SMBERROR("vn types other than VREG unsupported !\n");
250 return EIO;
251 }
252 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
253 if (uiop->uio_offset < 0)
254 return EINVAL;
255 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
256 return (EFBIG);*/
257 td = uiop->uio_td;
258 p = td->td_proc;
259 if (ioflag & (IO_APPEND | IO_SYNC)) {
260 if (np->n_flag & NMODIFIED) {
261 smbfs_attr_cacheremove(vp);
262 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1);
263 if (error)
264 return error;
265 }
266 if (ioflag & IO_APPEND) {
267 #if notyet
268 /*
269 * File size can be changed by another client
270 */
271 smbfs_attr_cacheremove(vp);
272 error = VOP_GETATTR(vp, &vattr, cred, td);
273 if (error) return (error);
274 #endif
275 uiop->uio_offset = np->n_size;
276 }
277 }
278 if (uiop->uio_resid == 0)
279 return 0;
280 if (p != NULL) {
281 PROC_LOCK(p);
282 if (uiop->uio_offset + uiop->uio_resid >
283 lim_cur(p, RLIMIT_FSIZE)) {
284 psignal(p, SIGXFSZ);
285 PROC_UNLOCK(p);
286 return EFBIG;
287 }
288 PROC_UNLOCK(p);
289 }
290 smb_makescred(&scred, td, cred);
291 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
292 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
293 if (!error) {
294 if (uiop->uio_offset > np->n_size) {
295 np->n_size = uiop->uio_offset;
296 vnode_pager_setsize(vp, np->n_size);
297 }
298 }
299 return error;
300 }
301
302 /*
303 * Do an I/O operation to/from a cache block.
304 */
305 int
306 smbfs_doio(struct buf *bp, struct ucred *cr, struct thread *td)
307 {
308 struct vnode *vp = bp->b_vp;
309 struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
310 struct smbnode *np = VTOSMB(vp);
311 struct uio uio, *uiop = &uio;
312 struct iovec io;
313 struct smb_cred scred;
314 int error = 0;
315
316 uiop->uio_iov = &io;
317 uiop->uio_iovcnt = 1;
318 uiop->uio_segflg = UIO_SYSSPACE;
319 uiop->uio_td = td;
320
321 smb_makescred(&scred, td, cr);
322
323 if (bp->b_iocmd == BIO_READ) {
324 io.iov_len = uiop->uio_resid = bp->b_bcount;
325 io.iov_base = bp->b_data;
326 uiop->uio_rw = UIO_READ;
327 switch (vp->v_type) {
328 case VREG:
329 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
330 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
331 if (error)
332 break;
333 if (uiop->uio_resid) {
334 int left = uiop->uio_resid;
335 int nread = bp->b_bcount - left;
336 if (left > 0)
337 bzero((char *)bp->b_data + nread, left);
338 }
339 break;
340 default:
341 printf("smbfs_doio: type %x unexpected\n",vp->v_type);
342 break;
343 };
344 if (error) {
345 bp->b_error = error;
346 bp->b_ioflags |= BIO_ERROR;
347 }
348 } else { /* write */
349 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
350 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
351
352 if (bp->b_dirtyend > bp->b_dirtyoff) {
353 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
354 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
355 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
356 uiop->uio_rw = UIO_WRITE;
357 bp->b_flags |= B_WRITEINPROG;
358 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
359 bp->b_flags &= ~B_WRITEINPROG;
360
361 /*
362 * For an interrupted write, the buffer is still valid
363 * and the write hasn't been pushed to the server yet,
364 * so we can't set BIO_ERROR and report the interruption
365 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
366 * is not relevant, so the rpc attempt is essentially
367 * a noop. For the case of a V3 write rpc not being
368 * committed to stable storage, the block is still
369 * dirty and requires either a commit rpc or another
370 * write rpc with iomode == NFSV3WRITE_FILESYNC before
371 * the block is reused. This is indicated by setting
372 * the B_DELWRI and B_NEEDCOMMIT flags.
373 */
374 if (error == EINTR
375 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
376 int s;
377
378 s = splbio();
379 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
380 if ((bp->b_flags & B_ASYNC) == 0)
381 bp->b_flags |= B_EINTR;
382 if ((bp->b_flags & B_PAGING) == 0) {
383 bdirty(bp);
384 bp->b_flags &= ~B_DONE;
385 }
386 if ((bp->b_flags & B_ASYNC) == 0)
387 bp->b_flags |= B_EINTR;
388 splx(s);
389 } else {
390 if (error) {
391 bp->b_ioflags |= BIO_ERROR;
392 bp->b_error = error;
393 }
394 bp->b_dirtyoff = bp->b_dirtyend = 0;
395 }
396 } else {
397 bp->b_resid = 0;
398 bufdone(bp);
399 return 0;
400 }
401 }
402 bp->b_resid = uiop->uio_resid;
403 bufdone(bp);
404 return error;
405 }
406
407 /*
408 * Vnode op for VM getpages.
409 * Wish wish .... get rid from multiple IO routines
410 */
411 int
412 smbfs_getpages(ap)
413 struct vop_getpages_args /* {
414 struct vnode *a_vp;
415 vm_page_t *a_m;
416 int a_count;
417 int a_reqpage;
418 vm_ooffset_t a_offset;
419 } */ *ap;
420 {
421 #ifdef SMBFS_RWGENERIC
422 return vop_stdgetpages(ap);
423 #else
424 int i, error, nextoff, size, toff, npages, count, reqpage;
425 struct uio uio;
426 struct iovec iov;
427 vm_offset_t kva;
428 struct buf *bp;
429 struct vnode *vp;
430 struct thread *td;
431 struct ucred *cred;
432 struct smbmount *smp;
433 struct smbnode *np;
434 struct smb_cred scred;
435 vm_object_t object;
436 vm_page_t *pages, m;
437
438 vp = ap->a_vp;
439 if ((object = vp->v_object) == NULL) {
440 printf("smbfs_getpages: called with non-merged cache vnode??\n");
441 return VM_PAGER_ERROR;
442 }
443
444 td = curthread; /* XXX */
445 cred = td->td_ucred; /* XXX */
446 np = VTOSMB(vp);
447 smp = VFSTOSMBFS(vp->v_mount);
448 pages = ap->a_m;
449 count = ap->a_count;
450 npages = btoc(count);
451 reqpage = ap->a_reqpage;
452
453 /*
454 * If the requested page is partially valid, just return it and
455 * allow the pager to zero-out the blanks. Partially valid pages
456 * can only occur at the file EOF.
457 */
458 m = pages[reqpage];
459
460 VM_OBJECT_LOCK(object);
461 if (m->valid != 0) {
462 /* handled by vm_fault now */
463 /* vm_page_zero_invalid(m, TRUE); */
464 vm_page_lock_queues();
465 for (i = 0; i < npages; ++i) {
466 if (i != reqpage)
467 vm_page_free(pages[i]);
468 }
469 vm_page_unlock_queues();
470 VM_OBJECT_UNLOCK(object);
471 return 0;
472 }
473 VM_OBJECT_UNLOCK(object);
474
475 smb_makescred(&scred, td, cred);
476
477 bp = getpbuf(&smbfs_pbuf_freecnt);
478
479 kva = (vm_offset_t) bp->b_data;
480 pmap_qenter(kva, pages, npages);
481 cnt.v_vnodein++;
482 cnt.v_vnodepgsin += npages;
483
484 iov.iov_base = (caddr_t) kva;
485 iov.iov_len = count;
486 uio.uio_iov = &iov;
487 uio.uio_iovcnt = 1;
488 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
489 uio.uio_resid = count;
490 uio.uio_segflg = UIO_SYSSPACE;
491 uio.uio_rw = UIO_READ;
492 uio.uio_td = td;
493
494 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
495 pmap_qremove(kva, npages);
496
497 relpbuf(bp, &smbfs_pbuf_freecnt);
498
499 VM_OBJECT_LOCK(object);
500 if (error && (uio.uio_resid == count)) {
501 printf("smbfs_getpages: error %d\n",error);
502 vm_page_lock_queues();
503 for (i = 0; i < npages; i++) {
504 if (reqpage != i)
505 vm_page_free(pages[i]);
506 }
507 vm_page_unlock_queues();
508 VM_OBJECT_UNLOCK(object);
509 return VM_PAGER_ERROR;
510 }
511
512 size = count - uio.uio_resid;
513
514 vm_page_lock_queues();
515 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
516 vm_page_t m;
517 nextoff = toff + PAGE_SIZE;
518 m = pages[i];
519
520 if (nextoff <= size) {
521 /*
522 * Read operation filled an entire page
523 */
524 m->valid = VM_PAGE_BITS_ALL;
525 vm_page_undirty(m);
526 } else if (size > toff) {
527 /*
528 * Read operation filled a partial page.
529 */
530 m->valid = 0;
531 vm_page_set_validclean(m, 0, size - toff);
532 /* handled by vm_fault now */
533 /* vm_page_zero_invalid(m, TRUE); */
534 } else {
535 /*
536 * Read operation was short. If no error occured
537 * we may have hit a zero-fill section. We simply
538 * leave valid set to 0.
539 */
540 ;
541 }
542
543 if (i != reqpage) {
544 /*
545 * Whether or not to leave the page activated is up in
546 * the air, but we should put the page on a page queue
547 * somewhere (it already is in the object). Result:
548 * It appears that emperical results show that
549 * deactivating pages is best.
550 */
551
552 /*
553 * Just in case someone was asking for this page we
554 * now tell them that it is ok to use.
555 */
556 if (!error) {
557 if (m->flags & PG_WANTED)
558 vm_page_activate(m);
559 else
560 vm_page_deactivate(m);
561 vm_page_wakeup(m);
562 } else {
563 vm_page_free(m);
564 }
565 }
566 }
567 vm_page_unlock_queues();
568 VM_OBJECT_UNLOCK(object);
569 return 0;
570 #endif /* SMBFS_RWGENERIC */
571 }
572
573 /*
574 * Vnode op for VM putpages.
575 * possible bug: all IO done in sync mode
576 * Note that vop_close always invalidate pages before close, so it's
577 * not necessary to open vnode.
578 */
579 int
580 smbfs_putpages(ap)
581 struct vop_putpages_args /* {
582 struct vnode *a_vp;
583 vm_page_t *a_m;
584 int a_count;
585 int a_sync;
586 int *a_rtvals;
587 vm_ooffset_t a_offset;
588 } */ *ap;
589 {
590 int error;
591 struct vnode *vp = ap->a_vp;
592 struct thread *td;
593 struct ucred *cred;
594
595 #ifdef SMBFS_RWGENERIC
596 td = curthread; /* XXX */
597 cred = td->td_ucred; /* XXX */
598 VOP_OPEN(vp, FWRITE, cred, td, -1);
599 error = vop_stdputpages(ap);
600 VOP_CLOSE(vp, FWRITE, cred, td);
601 return error;
602 #else
603 struct uio uio;
604 struct iovec iov;
605 vm_offset_t kva;
606 struct buf *bp;
607 int i, npages, count;
608 int *rtvals;
609 struct smbmount *smp;
610 struct smbnode *np;
611 struct smb_cred scred;
612 vm_page_t *pages;
613
614 td = curthread; /* XXX */
615 cred = td->td_ucred; /* XXX */
616 /* VOP_OPEN(vp, FWRITE, cred, td, -1);*/
617 np = VTOSMB(vp);
618 smp = VFSTOSMBFS(vp->v_mount);
619 pages = ap->a_m;
620 count = ap->a_count;
621 rtvals = ap->a_rtvals;
622 npages = btoc(count);
623
624 for (i = 0; i < npages; i++) {
625 rtvals[i] = VM_PAGER_AGAIN;
626 }
627
628 bp = getpbuf(&smbfs_pbuf_freecnt);
629
630 kva = (vm_offset_t) bp->b_data;
631 pmap_qenter(kva, pages, npages);
632 cnt.v_vnodeout++;
633 cnt.v_vnodepgsout += count;
634
635 iov.iov_base = (caddr_t) kva;
636 iov.iov_len = count;
637 uio.uio_iov = &iov;
638 uio.uio_iovcnt = 1;
639 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
640 uio.uio_resid = count;
641 uio.uio_segflg = UIO_SYSSPACE;
642 uio.uio_rw = UIO_WRITE;
643 uio.uio_td = td;
644 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
645
646 smb_makescred(&scred, td, cred);
647 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
648 /* VOP_CLOSE(vp, FWRITE, cred, td);*/
649 SMBVDEBUG("paged write done: %d\n", error);
650
651 pmap_qremove(kva, npages);
652
653 relpbuf(bp, &smbfs_pbuf_freecnt);
654
655 if (!error) {
656 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
657 vm_page_lock_queues();
658 for (i = 0; i < nwritten; i++) {
659 rtvals[i] = VM_PAGER_OK;
660 vm_page_undirty(pages[i]);
661 }
662 vm_page_unlock_queues();
663 }
664 return rtvals[0];
665 #endif /* SMBFS_RWGENERIC */
666 }
667
668 /*
669 * Flush and invalidate all dirty buffers. If another process is already
670 * doing the flush, just wait for completion.
671 */
672 int
673 smbfs_vinvalbuf(vp, flags, cred, td, intrflg)
674 struct vnode *vp;
675 int flags;
676 struct ucred *cred;
677 struct thread *td;
678 int intrflg;
679 {
680 struct smbnode *np = VTOSMB(vp);
681 int error = 0, slpflag, slptimeo;
682
683 if (vp->v_iflag & VI_XLOCK)
684 return 0;
685
686 if (intrflg) {
687 slpflag = PCATCH;
688 slptimeo = 2 * hz;
689 } else {
690 slpflag = 0;
691 slptimeo = 0;
692 }
693 while (np->n_flag & NFLUSHINPROG) {
694 np->n_flag |= NFLUSHWANT;
695 error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", slptimeo);
696 error = smb_td_intr(td);
697 if (error == EINTR && intrflg)
698 return EINTR;
699 }
700 np->n_flag |= NFLUSHINPROG;
701 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
702 while (error) {
703 if (intrflg && (error == ERESTART || error == EINTR)) {
704 np->n_flag &= ~NFLUSHINPROG;
705 if (np->n_flag & NFLUSHWANT) {
706 np->n_flag &= ~NFLUSHWANT;
707 wakeup(&np->n_flag);
708 }
709 return EINTR;
710 }
711 error = vinvalbuf(vp, flags, cred, td, slpflag, 0);
712 }
713 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
714 if (np->n_flag & NFLUSHWANT) {
715 np->n_flag &= ~NFLUSHWANT;
716 wakeup(&np->n_flag);
717 }
718 return (error);
719 }
Cache object: ceb4aafebe30642a65389a9760273a9a
|