FreeBSD/Linux Kernel Cross Reference
sys/fs/nwfs/nwfs_io.c
1 /*-
2 * Copyright (c) 1999, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 *
34 */
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bio.h>
39 #include <sys/buf.h>
40 #include <sys/mount.h>
41 #include <sys/namei.h>
42 #include <sys/vnode.h>
43 #include <sys/dirent.h>
44 #include <sys/sysctl.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_pager.h>
51 #include <vm/vnode_pager.h>
52
53 #include <netncp/ncp.h>
54 #include <netncp/ncp_conn.h>
55 #include <netncp/ncp_subr.h>
56 #include <netncp/ncp_ncp.h>
57
58 #include <fs/nwfs/nwfs.h>
59 #include <fs/nwfs/nwfs_node.h>
60 #include <fs/nwfs/nwfs_subr.h>
61
62 static int nwfs_fastlookup = 1;
63
64 SYSCTL_DECL(_vfs_nwfs);
65 SYSCTL_INT(_vfs_nwfs, OID_AUTO, fastlookup, CTLFLAG_RW, &nwfs_fastlookup, 0, "");
66
67
68 extern int nwfs_pbuf_freecnt;
69
70 #define DE_SIZE (sizeof(struct dirent))
71 #define NWFS_RWCACHE
72
73 static int
74 nwfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred) {
75 struct nwmount *nmp = VTONWFS(vp);
76 int error, count, i;
77 struct dirent dp;
78 struct nwnode *np = VTONW(vp);
79 struct nw_entry_info fattr;
80 struct vnode *newvp;
81 struct componentname cn;
82 ncpfid fid;
83
84 np = VTONW(vp);
85 NCPVNDEBUG("dirname='%s'\n",np->n_name);
86 if (uio->uio_resid < DE_SIZE || (uio->uio_offset < 0))
87 return (EINVAL);
88 error = 0;
89 count = 0;
90 i = uio->uio_offset / DE_SIZE; /* offset in directory */
91 if (i == 0) {
92 error = ncp_initsearch(vp, uio->uio_td, cred);
93 if (error) {
94 NCPVNDEBUG("cannot initialize search, error=%d",error);
95 return( error );
96 }
97 }
98
99 for (; uio->uio_resid >= DE_SIZE; i++) {
100 bzero((char *) &dp, DE_SIZE);
101 dp.d_reclen = DE_SIZE;
102 switch (i) {
103 case 0: /* `.' */
104 case 1: /* `..' */
105 dp.d_fileno = (i == 0) ? np->n_fid.f_id : np->n_parent.f_id;
106 if (!dp.d_fileno) dp.d_fileno = NWFS_ROOT_INO;
107 dp.d_namlen = i + 1;
108 dp.d_name[0] = '.';
109 dp.d_name[1] = '.';
110 dp.d_name[i + 1] = '\0';
111 dp.d_type = DT_DIR;
112 break;
113 default:
114 error = ncp_search_for_file_or_subdir(nmp, &np->n_seq, &fattr, uio->uio_td, cred);
115 if (error && error < 0x80) break;
116 dp.d_fileno = fattr.dirEntNum;
117 dp.d_type = (fattr.attributes & aDIR) ? DT_DIR : DT_REG;
118 dp.d_namlen = fattr.nameLen;
119 bcopy(fattr.entryName, dp.d_name, dp.d_namlen);
120 dp.d_name[dp.d_namlen] = '\0';
121 #if 0
122 if (error && eofflag) {
123 /* *eofflag = 1;*/
124 break;
125 }
126 #endif
127 break;
128 }
129 if (nwfs_fastlookup && !error && i > 1) {
130 fid.f_id = fattr.dirEntNum;
131 fid.f_parent = np->n_fid.f_id;
132 error = nwfs_nget(vp->v_mount, fid, &fattr, vp, &newvp);
133 if (!error) {
134 VTONW(newvp)->n_ctime = VTONW(newvp)->n_vattr.va_ctime.tv_sec;
135 cn.cn_nameptr = dp.d_name;
136 cn.cn_namelen = dp.d_namlen;
137 cache_enter(vp, newvp, &cn);
138 vput(newvp);
139 } else
140 error = 0;
141 }
142 if (error >= 0x80) {
143 error = 0;
144 break;
145 }
146 if ((error = uiomove(&dp, DE_SIZE, uio)))
147 break;
148 }
149
150 uio->uio_offset = i * DE_SIZE;
151 return (error);
152 }
153
154 int
155 nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) {
156 struct nwmount *nmp = VFSTONWFS(vp->v_mount);
157 struct nwnode *np = VTONW(vp);
158 struct thread *td;
159 struct vattr vattr;
160 int error, biosize;
161
162 if (vp->v_type != VREG && vp->v_type != VDIR) {
163 printf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__);
164 return EIO;
165 }
166 if (uiop->uio_resid == 0) return 0;
167 if (uiop->uio_offset < 0) return EINVAL;
168 /* if (uiop->uio_offset + uiop->uio_resid > nmp->nm_maxfilesize)
169 return (EFBIG);*/
170 td = uiop->uio_td;
171 if (vp->v_type == VDIR) {
172 error = nwfs_readvdir(vp, uiop, cred);
173 return error;
174 }
175 biosize = NWFSTOCONN(nmp)->buffer_size;
176 if (np->n_flag & NMODIFIED) {
177 nwfs_attr_cacheremove(vp);
178 error = VOP_GETATTR(vp, &vattr, cred);
179 if (error) return (error);
180 np->n_mtime = vattr.va_mtime.tv_sec;
181 } else {
182 error = VOP_GETATTR(vp, &vattr, cred);
183 if (error) return (error);
184 if (np->n_mtime != vattr.va_mtime.tv_sec) {
185 error = nwfs_vinvalbuf(vp, td);
186 if (error) return (error);
187 np->n_mtime = vattr.va_mtime.tv_sec;
188 }
189 }
190 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
191 return (error);
192 }
193
194 int
195 nwfs_writevnode(vp, uiop, cred, ioflag)
196 struct vnode *vp;
197 struct uio *uiop;
198 struct ucred *cred;
199 int ioflag;
200 {
201 struct nwmount *nmp = VTONWFS(vp);
202 struct nwnode *np = VTONW(vp);
203 struct thread *td;
204 /* struct vattr vattr;*/
205 int error = 0;
206
207 if (vp->v_type != VREG) {
208 printf("%s: vn types other than VREG unsupported !\n",__func__);
209 return EIO;
210 }
211 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
212 if (uiop->uio_offset < 0) return EINVAL;
213 /* if (uiop->uio_offset + uiop->uio_resid > nmp->nm_maxfilesize)
214 return (EFBIG);*/
215 td = uiop->uio_td;
216 if (ioflag & (IO_APPEND | IO_SYNC)) {
217 if (np->n_flag & NMODIFIED) {
218 nwfs_attr_cacheremove(vp);
219 error = nwfs_vinvalbuf(vp, td);
220 if (error) return (error);
221 }
222 if (ioflag & IO_APPEND) {
223 /* We can relay only on local information about file size,
224 * because until file is closed NetWare will not return
225 * the correct size. */
226 #ifdef notyet
227 nwfs_attr_cacheremove(vp);
228 error = VOP_GETATTR(vp, &vattr, cred);
229 if (error) return (error);
230 #endif
231 uiop->uio_offset = np->n_size;
232 }
233 }
234 if (uiop->uio_resid == 0) return 0;
235
236 if (vn_rlimit_fsize(vp, uiop, td))
237 return (EFBIG);
238
239 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
240 NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
241 if (!error) {
242 if (uiop->uio_offset > np->n_size) {
243 np->n_vattr.va_size = np->n_size = uiop->uio_offset;
244 vnode_pager_setsize(vp, np->n_size);
245 }
246 }
247 return (error);
248 }
249
250 /*
251 * Do an I/O operation to/from a cache block.
252 */
253 int
254 nwfs_doio(vp, bp, cr, td)
255 struct vnode *vp;
256 struct buf *bp;
257 struct ucred *cr;
258 struct thread *td;
259 {
260 struct uio *uiop;
261 struct nwnode *np;
262 struct nwmount *nmp;
263 int error = 0;
264 struct uio uio;
265 struct iovec io;
266
267 np = VTONW(vp);
268 nmp = VFSTONWFS(vp->v_mount);
269 uiop = &uio;
270 uiop->uio_iov = &io;
271 uiop->uio_iovcnt = 1;
272 uiop->uio_segflg = UIO_SYSSPACE;
273 uiop->uio_td = td;
274 if (bp->b_iocmd == BIO_READ) {
275 io.iov_len = uiop->uio_resid = bp->b_bcount;
276 io.iov_base = bp->b_data;
277 uiop->uio_rw = UIO_READ;
278 switch (vp->v_type) {
279 case VREG:
280 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
281 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
282 if (error)
283 break;
284 if (uiop->uio_resid) {
285 int left = uiop->uio_resid;
286 int nread = bp->b_bcount - left;
287 if (left > 0)
288 bzero((char *)bp->b_data + nread, left);
289 }
290 break;
291 /* case VDIR:
292 nfsstats.readdir_bios++;
293 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
294 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
295 error = nfs_readdirplusrpc(vp, uiop, cr);
296 if (error == NFSERR_NOTSUPP)
297 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
298 }
299 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
300 error = nfs_readdirrpc(vp, uiop, cr);
301 if (error == 0 && uiop->uio_resid == bp->b_bcount)
302 bp->b_flags |= B_INVAL;
303 break;
304 */
305 default:
306 printf("nwfs_doio: type %x unexpected\n",vp->v_type);
307 break;
308 };
309 if (error) {
310 bp->b_ioflags |= BIO_ERROR;
311 bp->b_error = error;
312 }
313 } else { /* write */
314 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
315 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
316
317 if (bp->b_dirtyend > bp->b_dirtyoff) {
318 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
319 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
320 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
321 uiop->uio_rw = UIO_WRITE;
322 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
323
324 /*
325 * For an interrupted write, the buffer is still valid
326 * and the write hasn't been pushed to the server yet,
327 * so we can't set BIO_ERROR and report the interruption
328 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
329 * is not relevant, so the rpc attempt is essentially
330 * a noop. For the case of a V3 write rpc not being
331 * committed to stable storage, the block is still
332 * dirty and requires either a commit rpc or another
333 * write rpc with iomode == NFSV3WRITE_FILESYNC before
334 * the block is reused. This is indicated by setting
335 * the B_DELWRI and B_NEEDCOMMIT flags.
336 */
337 if (error == EINTR
338 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
339 int s;
340
341 s = splbio();
342 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
343 if ((bp->b_flags & B_ASYNC) == 0)
344 bp->b_flags |= B_EINTR;
345 if ((bp->b_flags & B_PAGING) == 0) {
346 bdirty(bp);
347 bp->b_flags &= ~B_DONE;
348 }
349 if ((bp->b_flags & B_ASYNC) == 0)
350 bp->b_flags |= B_EINTR;
351 splx(s);
352 } else {
353 if (error) {
354 bp->b_ioflags |= BIO_ERROR;
355 bp->b_error /*= np->n_error */= error;
356 /* np->n_flag |= NWRITEERR;*/
357 }
358 bp->b_dirtyoff = bp->b_dirtyend = 0;
359 }
360 } else {
361 bp->b_resid = 0;
362 bufdone(bp);
363 return (0);
364 }
365 }
366 bp->b_resid = uiop->uio_resid;
367 bufdone(bp);
368 return (error);
369 }
370
371 /*
372 * Vnode op for VM getpages.
373 * Wish wish .... get rid from multiple IO routines
374 */
375 int
376 nwfs_getpages(ap)
377 struct vop_getpages_args /* {
378 struct vnode *a_vp;
379 vm_page_t *a_m;
380 int a_count;
381 int a_reqpage;
382 vm_ooffset_t a_offset;
383 } */ *ap;
384 {
385 #ifndef NWFS_RWCACHE
386 return vop_stdgetpages(ap);(ap->a_vp, ap->a_m, ap->a_count,
387 #else
388 int i, error, nextoff, size, toff, npages, count;
389 struct uio uio;
390 struct iovec iov;
391 vm_offset_t kva;
392 struct buf *bp;
393 struct vnode *vp;
394 struct thread *td;
395 struct ucred *cred;
396 struct nwmount *nmp;
397 struct nwnode *np;
398 vm_object_t object;
399 vm_page_t *pages;
400
401 vp = ap->a_vp;
402 td = curthread; /* XXX */
403 cred = td->td_ucred; /* XXX */
404 np = VTONW(vp);
405 nmp = VFSTONWFS(vp->v_mount);
406 pages = ap->a_m;
407 count = ap->a_count;
408
409 if ((object = vp->v_object) == NULL) {
410 printf("nwfs_getpages: called with non-merged cache vnode??\n");
411 return VM_PAGER_ERROR;
412 }
413
414 bp = getpbuf(&nwfs_pbuf_freecnt);
415 npages = btoc(count);
416 kva = (vm_offset_t) bp->b_data;
417 pmap_qenter(kva, pages, npages);
418
419 iov.iov_base = (caddr_t) kva;
420 iov.iov_len = count;
421 uio.uio_iov = &iov;
422 uio.uio_iovcnt = 1;
423 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
424 uio.uio_resid = count;
425 uio.uio_segflg = UIO_SYSSPACE;
426 uio.uio_rw = UIO_READ;
427 uio.uio_td = td;
428
429 error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
430 pmap_qremove(kva, npages);
431
432 relpbuf(bp, &nwfs_pbuf_freecnt);
433
434 VM_OBJECT_LOCK(object);
435 if (error && (uio.uio_resid == count)) {
436 printf("nwfs_getpages: error %d\n",error);
437 vm_page_lock_queues();
438 for (i = 0; i < npages; i++) {
439 if (ap->a_reqpage != i)
440 vm_page_free(pages[i]);
441 }
442 vm_page_unlock_queues();
443 VM_OBJECT_UNLOCK(object);
444 return VM_PAGER_ERROR;
445 }
446
447 size = count - uio.uio_resid;
448
449 vm_page_lock_queues();
450 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
451 vm_page_t m;
452 nextoff = toff + PAGE_SIZE;
453 m = pages[i];
454
455 if (nextoff <= size) {
456 m->valid = VM_PAGE_BITS_ALL;
457 KASSERT(m->dirty == 0,
458 ("nwfs_getpages: page %p is dirty", m));
459 } else {
460 int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
461 vm_page_set_valid(m, 0, nvalid);
462 KASSERT((m->dirty & vm_page_bits(0, nvalid)) == 0,
463 ("nwfs_getpages: page %p is dirty", m));
464 }
465
466 if (i != ap->a_reqpage) {
467 /*
468 * Whether or not to leave the page activated is up in
469 * the air, but we should put the page on a page queue
470 * somewhere (it already is in the object). Result:
471 * It appears that emperical results show that
472 * deactivating pages is best.
473 */
474
475 /*
476 * Just in case someone was asking for this page we
477 * now tell them that it is ok to use.
478 */
479 if (!error) {
480 if (m->oflags & VPO_WANTED)
481 vm_page_activate(m);
482 else
483 vm_page_deactivate(m);
484 vm_page_wakeup(m);
485 } else {
486 vm_page_free(m);
487 }
488 }
489 }
490 vm_page_unlock_queues();
491 VM_OBJECT_UNLOCK(object);
492 return 0;
493 #endif /* NWFS_RWCACHE */
494 }
495
496 /*
497 * Vnode op for VM putpages.
498 * possible bug: all IO done in sync mode
499 * Note that vop_close always invalidate pages before close, so it's
500 * not necessary to open vnode.
501 */
502 int
503 nwfs_putpages(ap)
504 struct vop_putpages_args /* {
505 struct vnode *a_vp;
506 vm_page_t *a_m;
507 int a_count;
508 int a_sync;
509 int *a_rtvals;
510 vm_ooffset_t a_offset;
511 } */ *ap;
512 {
513 int error;
514 struct vnode *vp = ap->a_vp;
515 struct thread *td;
516 struct ucred *cred;
517
518 #ifndef NWFS_RWCACHE
519 td = curthread; /* XXX */
520 cred = td->td_ucred; /* XXX */
521 VOP_OPEN(vp, FWRITE, cred, td, NULL);
522 error = vop_stdputpages(ap);
523 VOP_CLOSE(vp, FWRITE, cred, td);
524 return error;
525 #else
526 struct uio uio;
527 struct iovec iov;
528 vm_offset_t kva;
529 struct buf *bp;
530 int i, npages, count;
531 int *rtvals;
532 struct nwmount *nmp;
533 struct nwnode *np;
534 vm_page_t *pages;
535
536 td = curthread; /* XXX */
537 cred = td->td_ucred; /* XXX */
538 /* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
539 np = VTONW(vp);
540 nmp = VFSTONWFS(vp->v_mount);
541 pages = ap->a_m;
542 count = ap->a_count;
543 rtvals = ap->a_rtvals;
544 npages = btoc(count);
545
546 for (i = 0; i < npages; i++) {
547 rtvals[i] = VM_PAGER_ERROR;
548 }
549
550 bp = getpbuf(&nwfs_pbuf_freecnt);
551 kva = (vm_offset_t) bp->b_data;
552 pmap_qenter(kva, pages, npages);
553
554 iov.iov_base = (caddr_t) kva;
555 iov.iov_len = count;
556 uio.uio_iov = &iov;
557 uio.uio_iovcnt = 1;
558 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
559 uio.uio_resid = count;
560 uio.uio_segflg = UIO_SYSSPACE;
561 uio.uio_rw = UIO_WRITE;
562 uio.uio_td = td;
563 NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
564
565 error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
566 /* VOP_CLOSE(vp, FWRITE, cred, td);*/
567 NCPVNDEBUG("paged write done: %d\n", error);
568
569 pmap_qremove(kva, npages);
570 relpbuf(bp, &nwfs_pbuf_freecnt);
571
572 if (!error)
573 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
574 return rtvals[0];
575 #endif /* NWFS_RWCACHE */
576 }
577 /*
578 * Flush and invalidate all dirty buffers. If another process is already
579 * doing the flush, just wait for completion.
580 */
581 int
582 nwfs_vinvalbuf(vp, td)
583 struct vnode *vp;
584 struct thread *td;
585 {
586 struct nwnode *np = VTONW(vp);
587 /* struct nwmount *nmp = VTONWFS(vp);*/
588 int error = 0;
589
590 if (vp->v_iflag & VI_DOOMED)
591 return (0);
592
593 while (np->n_flag & NFLUSHINPROG) {
594 np->n_flag |= NFLUSHWANT;
595 error = tsleep(&np->n_flag, PRIBIO + 2, "nwfsvinv", 2 * hz);
596 error = ncp_chkintr(NWFSTOCONN(VTONWFS(vp)), td);
597 if (error == EINTR)
598 return EINTR;
599 }
600 np->n_flag |= NFLUSHINPROG;
601
602 if (vp->v_bufobj.bo_object != NULL) {
603 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
604 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
605 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
606 }
607
608 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
609 while (error) {
610 if (error == ERESTART || error == EINTR) {
611 np->n_flag &= ~NFLUSHINPROG;
612 if (np->n_flag & NFLUSHWANT) {
613 np->n_flag &= ~NFLUSHWANT;
614 wakeup(&np->n_flag);
615 }
616 return EINTR;
617 }
618 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
619 }
620 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
621 if (np->n_flag & NFLUSHWANT) {
622 np->n_flag &= ~NFLUSHWANT;
623 wakeup(&np->n_flag);
624 }
625 return (error);
626 }
Cache object: 2e4e65f58ac0062cac131f40bb3d261f
|