FreeBSD/Linux Kernel Cross Reference
sys/netsmb/smb_iod.c
1 /* $NetBSD: smb_iod.c,v 1.29 2008/06/24 10:37:19 gmcgarry Exp $ */
2
3 /*
4 * Copyright (c) 2000-2001 Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/netsmb/smb_iod.c,v 1.4 2001/12/09 17:48:08 arr Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_iod.c,v 1.29 2008/06/24 10:37:19 gmcgarry Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/unistd.h>
48
49 #include <netsmb/smb.h>
50 #include <netsmb/smb_conn.h>
51 #include <netsmb/smb_rq.h>
52 #include <netsmb/smb_tran.h>
53 #include <netsmb/smb_trantcp.h>
54
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
58
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
62
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
64
65 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
66
67 static int smb_iod_next;
68
69 static void smb_iod_sendall(struct smbiod *iod);
70 static int smb_iod_disconnect(struct smbiod *iod);
71 static void smb_iod_thread(void *);
72
73 static void
74 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
75 {
76 SMBRQ_SLOCK(rqp);
77 rqp->sr_lerror = error;
78 rqp->sr_rpgen++;
79 rqp->sr_state = SMBRQ_NOTIFIED;
80 wakeup(&rqp->sr_state);
81 callout_stop(&rqp->sr_timo_ch);
82 if (rqp->sr_recvcallback)
83 (*rqp->sr_recvcallback)(rqp->sr_recvarg);
84 SMBRQ_SUNLOCK(rqp);
85 }
86
87 static void
88 smb_iod_rqtimedout(void *arg)
89 {
90 smb_iod_rqprocessed((struct smb_rq *)arg, ETIMEDOUT);
91 }
92
93 static void
94 smb_iod_invrq(struct smbiod *iod)
95 {
96 struct smb_rq *rqp;
97
98 /*
99 * Invalidate all outstanding requests for this connection
100 */
101 SMB_IOD_RQLOCK(iod);
102 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
103 if (rqp->sr_flags & SMBR_INTERNAL)
104 SMBRQ_SUNLOCK(rqp);
105 rqp->sr_flags |= SMBR_RESTART;
106 smb_iod_rqprocessed(rqp, ENOTCONN);
107 }
108 SMB_IOD_RQUNLOCK(iod);
109 }
110
111 static void
112 smb_iod_closetran(struct smbiod *iod)
113 {
114 struct smb_vc *vcp = iod->iod_vc;
115 struct lwp *l = iod->iod_l;
116
117 if (vcp->vc_tdata == NULL)
118 return;
119 SMB_TRAN_DISCONNECT(vcp, l);
120 SMB_TRAN_DONE(vcp, l);
121 vcp->vc_tdata = NULL;
122 }
123
124 static void
125 smb_iod_dead(struct smbiod *iod)
126 {
127 iod->iod_state = SMBIOD_ST_DEAD;
128 smb_iod_closetran(iod);
129 smb_iod_invrq(iod);
130 }
131
132 static int
133 smb_iod_connect(struct smbiod *iod)
134 {
135 struct smb_vc *vcp = iod->iod_vc;
136 struct lwp *l = iod->iod_l;
137 int error;
138
139 SMBIODEBUG(("%d\n", iod->iod_state));
140 switch(iod->iod_state) {
141 case SMBIOD_ST_VCACTIVE:
142 SMBIODEBUG(("called for already opened connection\n"));
143 return EISCONN;
144 case SMBIOD_ST_DEAD:
145 return ENOTCONN; /* XXX: last error code ? */
146 default:
147 break;
148 }
149 vcp->vc_genid++;
150
151 #define ithrow(cmd) \
152 if ((error = cmd)) \
153 goto fail
154
155 ithrow(SMB_TRAN_CREATE(vcp, l));
156 SMBIODEBUG(("tcreate\n"));
157 if (vcp->vc_laddr) {
158 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, l));
159 }
160 SMBIODEBUG(("tbind\n"));
161 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, l));
162 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
163 iod->iod_state = SMBIOD_ST_TRANACTIVE;
164 SMBIODEBUG(("tconnect\n"));
165 /* vcp->vc_mid = 0;*/
166 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
167 SMBIODEBUG(("snegotiate\n"));
168 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
169 iod->iod_state = SMBIOD_ST_VCACTIVE;
170
171 #undef ithrow
172
173 SMBIODEBUG(("completed\n"));
174 smb_iod_invrq(iod);
175
176 return (0);
177
178 fail:
179 smb_iod_dead(iod);
180 return (error);
181 }
182
183 static int
184 smb_iod_disconnect(struct smbiod *iod)
185 {
186 struct smb_vc *vcp = iod->iod_vc;
187
188 SMBIODEBUG(("\n"));
189 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
190 smb_smb_ssnclose(vcp, &iod->iod_scred);
191 iod->iod_state = SMBIOD_ST_TRANACTIVE;
192 }
193 vcp->vc_smbuid = SMB_UID_UNKNOWN;
194 smb_iod_closetran(iod);
195 iod->iod_state = SMBIOD_ST_NOTCONN;
196 return 0;
197 }
198
199 static int
200 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
201 {
202 int error;
203
204 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
205 if (iod->iod_state != SMBIOD_ST_DEAD)
206 return ENOTCONN;
207 iod->iod_state = SMBIOD_ST_RECONNECT;
208 error = smb_iod_connect(iod);
209 if (error)
210 return error;
211 }
212 SMBIODEBUG(("tree reconnect\n"));
213 SMBS_ST_LOCK(ssp);
214 ssp->ss_flags |= SMBS_RECONNECTING;
215 SMBS_ST_UNLOCK(ssp);
216 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
217 SMBS_ST_LOCK(ssp);
218 ssp->ss_flags &= ~SMBS_RECONNECTING;
219 SMBS_ST_UNLOCK(ssp);
220 wakeup(&ssp->ss_vcgenid);
221 return error;
222 }
223
224 static int
225 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
226 {
227 struct lwp *l = iod->iod_l;
228 struct smb_vc *vcp = iod->iod_vc;
229 struct smb_share *ssp = rqp->sr_share;
230 struct mbuf *m;
231 int error;
232
233 SMBIODEBUG(("iod_state = %d, rqmid %d\n", iod->iod_state, rqp->sr_mid));
234 switch (iod->iod_state) {
235 case SMBIOD_ST_NOTCONN:
236 smb_iod_rqprocessed(rqp, ENOTCONN);
237 return 0;
238 case SMBIOD_ST_DEAD:
239 iod->iod_state = SMBIOD_ST_RECONNECT;
240 return 0;
241 case SMBIOD_ST_RECONNECT:
242 return 0;
243 default:
244 break;
245 }
246 if (rqp->sr_sendcnt == 0) {
247 u_int16_t tid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
248 u_int16_t rquid = vcp ? vcp->vc_smbuid : 0;
249 #ifdef movedtoanotherplace
250 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
251 return 0;
252 #endif
253 SMBRQ_PUTLE16(rqp->sr_rqtid, tid);
254 SMBRQ_PUTLE16(rqp->sr_rquid, rquid);
255 mb_fixhdr(&rqp->sr_rq);
256 }
257 if (rqp->sr_sendcnt++ > 5) {
258 rqp->sr_flags |= SMBR_RESTART;
259 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
260 /*
261 * If all attempts to send a request failed, then
262 * something is seriously hosed.
263 */
264 return ENOTCONN;
265 }
266 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0));
267 m_dumpm(rqp->sr_rq.mb_top);
268 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
269 error = rqp->sr_lerror = (m) ? SMB_TRAN_SEND(vcp, m, l) : ENOBUFS;
270 if (error == 0) {
271 if (rqp->sr_timo > 0) {
272 callout_init(&rqp->sr_timo_ch, 0);
273 callout_reset(&rqp->sr_timo_ch, rqp->sr_timo,
274 smb_iod_rqtimedout, rqp);
275 }
276
277 if (rqp->sr_flags & SMBR_NOWAIT) {
278 /* caller doesn't want to wait, flag as processed */
279 smb_iod_rqprocessed(rqp, 0);
280 return (0);
281 }
282
283 #if 0
284 iod->iod_lastrqsent = ts;
285 #endif
286 rqp->sr_flags |= SMBR_SENT;
287 rqp->sr_state = SMBRQ_SENT;
288 return 0;
289 }
290 /*
291 * Check for fatal errors
292 */
293 if (vcp && SMB_TRAN_FATAL(vcp, error)) {
294 /*
295 * No further attempts should be made
296 */
297 return ENOTCONN;
298 }
299 if (smb_rq_intr(rqp))
300 smb_iod_rqprocessed(rqp, EINTR);
301 return 0;
302 }
303
304 /*
305 * Process incoming packets
306 */
307 static void
308 smb_iod_recvall(struct smbiod *iod)
309 {
310 struct smb_vc *vcp = iod->iod_vc;
311 struct lwp *l = iod->iod_l;
312 struct smb_rq *rqp;
313 struct mbuf *m;
314 u_char *hp;
315 u_short mid;
316 int error;
317
318 switch (iod->iod_state) {
319 case SMBIOD_ST_NOTCONN:
320 case SMBIOD_ST_DEAD:
321 case SMBIOD_ST_RECONNECT:
322 return;
323 default:
324 break;
325 }
326
327 for (;;) {
328 m = NULL;
329 error = SMB_TRAN_RECV(vcp, &m, l);
330 if (error == EWOULDBLOCK)
331 break;
332 if (SMB_TRAN_FATAL(vcp, error)) {
333 smb_iod_dead(iod);
334 break;
335 }
336 if (error)
337 break;
338 KASSERT(m != NULL);
339
340 m = m_pullup(m, SMB_HDRLEN);
341 if (m == NULL)
342 continue; /* wait for a good packet */
343 /*
344 * Now we got an entire and possibly invalid SMB packet.
345 * Be careful while parsing it.
346 */
347 m_dumpm(m);
348 hp = mtod(m, u_char*);
349 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
350 m_freem(m);
351 continue;
352 }
353 mid = SMB_HDRMID(hp);
354 SMBSDEBUG(("mid %04x\n", (u_int)mid));
355 SMB_IOD_RQLOCK(iod);
356 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
357 if (rqp->sr_mid != mid)
358 continue;
359 SMBRQ_SLOCK(rqp);
360 if (rqp->sr_rp.md_top == NULL) {
361 md_initm(&rqp->sr_rp, m);
362 } else {
363 if (rqp->sr_flags & SMBR_MULTIPACKET) {
364 md_append_record(&rqp->sr_rp, m);
365 } else {
366 SMBRQ_SUNLOCK(rqp);
367 SMBIODEBUG(("duplicate response %d (ignored)\n", mid));
368 break;
369 }
370 }
371 SMBRQ_SUNLOCK(rqp);
372 smb_iod_rqprocessed(rqp, 0);
373 break;
374 }
375 SMB_IOD_RQUNLOCK(iod);
376 if (rqp == NULL) {
377 SMBIODEBUG(("drop resp with mid %d\n", (u_int)mid));
378 /* smb_printrqlist(vcp);*/
379 m_freem(m);
380 }
381 }
382 /*
383 * check for interrupts
384 */
385 SMB_IOD_RQLOCK(iod);
386 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
387 if (smb_proc_intr(rqp->sr_cred->scr_l)) {
388 smb_iod_rqprocessed(rqp, EINTR);
389 }
390 }
391 SMB_IOD_RQUNLOCK(iod);
392 }
393
394 int
395 smb_iod_request(struct smbiod *iod, int event, void *ident)
396 {
397 struct smbiod_event *evp;
398 int error;
399
400 SMBIODEBUG(("\n"));
401 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
402 evp->ev_type = event;
403 evp->ev_ident = ident;
404 SMB_IOD_EVLOCK(iod);
405 SIMPLEQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
406 if ((event & SMBIOD_EV_SYNC) == 0) {
407 SMB_IOD_EVUNLOCK(iod);
408 smb_iod_wakeup(iod);
409 return 0;
410 }
411 smb_iod_wakeup(iod);
412 mtsleep(evp, PWAIT | PNORELOCK, "smbevw", 0, SMB_IOD_EVLOCKPTR(iod));
413 error = evp->ev_error;
414 free(evp, M_SMBIOD);
415 return error;
416 }
417
418 /*
419 * Place request in the queue.
420 * Request from smbiod have a high priority.
421 */
422 int
423 smb_iod_addrq(struct smb_rq *rqp)
424 {
425 struct smb_vc *vcp = rqp->sr_vc;
426 struct smbiod *iod = vcp->vc_iod;
427 int error;
428
429 SMBIODEBUG(("\n"));
430 if (rqp->sr_cred->scr_l == iod->iod_l) {
431 rqp->sr_flags |= SMBR_INTERNAL;
432 SMB_IOD_RQLOCK(iod);
433 SIMPLEQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
434 SMB_IOD_RQUNLOCK(iod);
435 for (;;) {
436 if (smb_iod_sendrq(iod, rqp) != 0) {
437 smb_iod_dead(iod);
438 break;
439 }
440 /*
441 * we don't need to lock state field here
442 */
443 if (rqp->sr_state != SMBRQ_NOTSENT)
444 break;
445 tsleep(&iod->iod_flags, PWAIT, "smbsndw", hz);
446 }
447 if (rqp->sr_lerror)
448 smb_iod_removerq(rqp);
449 return rqp->sr_lerror;
450 }
451
452 switch (iod->iod_state) {
453 case SMBIOD_ST_NOTCONN:
454 return ENOTCONN;
455 case SMBIOD_ST_DEAD:
456 error = smb_iod_request(iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
457 if (error)
458 return error;
459 /*
460 * Return error to force the caller reissue the request
461 * using new connection state.
462 */
463 return EXDEV;
464 default:
465 break;
466 }
467
468 SMB_IOD_RQLOCK(iod);
469 for (;;) {
470 #ifdef DIAGNOSTIC
471 if (vcp->vc_maxmux == 0)
472 panic("%s: vc maxmum == 0", __func__);
473 #endif
474 if (iod->iod_muxcnt < vcp->vc_maxmux)
475 break;
476 iod->iod_muxwant++;
477 /* XXX use interruptible sleep? */
478 mtsleep(&iod->iod_muxwant, PWAIT, "smbmux",
479 0, SMB_IOD_RQLOCKPTR(iod));
480 }
481 iod->iod_muxcnt++;
482 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
483 SMB_IOD_RQUNLOCK(iod);
484 smb_iod_wakeup(iod);
485 return 0;
486 }
487
488 int
489 smb_iod_removerq(struct smb_rq *rqp)
490 {
491 struct smb_vc *vcp = rqp->sr_vc;
492 struct smbiod *iod = vcp->vc_iod;
493
494 SMBIODEBUG(("\n"));
495 if (rqp->sr_flags & SMBR_INTERNAL) {
496 SMB_IOD_RQLOCK(iod);
497 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
498 SMB_IOD_RQUNLOCK(iod);
499 return 0;
500 }
501 SMB_IOD_RQLOCK(iod);
502 while (rqp->sr_flags & SMBR_XLOCK) {
503 rqp->sr_flags |= SMBR_XLOCKWANT;
504 mtsleep(rqp, PWAIT, "smbxrm", 0, SMB_IOD_RQLOCKPTR(iod));
505 }
506 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
507 iod->iod_muxcnt--;
508 if (iod->iod_muxwant) {
509 iod->iod_muxwant--;
510 wakeup(&iod->iod_muxwant);
511 }
512 SMB_IOD_RQUNLOCK(iod);
513 return 0;
514 }
515
516 int
517 smb_iod_waitrq(struct smb_rq *rqp)
518 {
519 struct smbiod *iod = rqp->sr_vc->vc_iod;
520 int error;
521
522 SMBIODEBUG(("\n"));
523 if (rqp->sr_flags & SMBR_INTERNAL) {
524 for (;;) {
525 smb_iod_sendall(iod);
526 smb_iod_recvall(iod);
527 if (rqp->sr_rpgen != rqp->sr_rplast)
528 break;
529 tsleep(&iod->iod_flags, PWAIT, "smbirq", hz);
530 }
531 smb_iod_removerq(rqp);
532 return rqp->sr_lerror;
533
534 }
535 SMBRQ_SLOCK(rqp);
536 if (rqp->sr_rpgen == rqp->sr_rplast) {
537 /* XXX interruptible sleep? */
538 mtsleep(&rqp->sr_state, PWAIT, "smbwrq", 0,
539 SMBRQ_SLOCKPTR(rqp));
540 }
541 rqp->sr_rplast++;
542 SMBRQ_SUNLOCK(rqp);
543 error = rqp->sr_lerror;
544 if (rqp->sr_flags & SMBR_MULTIPACKET) {
545 /*
546 * If request should stay in the list, then reinsert it
547 * at the end of queue so other waiters have chance to concur
548 */
549 SMB_IOD_RQLOCK(iod);
550 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
551 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
552 SMB_IOD_RQUNLOCK(iod);
553 } else
554 smb_iod_removerq(rqp);
555 return error;
556 }
557
558
559 static void
560 smb_iod_sendall(struct smbiod *iod)
561 {
562 struct smb_rq *rqp;
563 int herror;
564
565 herror = 0;
566 /*
567 * Loop through the list of requests and send them if possible
568 */
569 SMB_IOD_RQLOCK(iod);
570 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
571 if (__predict_false(rqp->sr_state == SMBRQ_NOTSENT)) {
572 rqp->sr_flags |= SMBR_XLOCK;
573 SMB_IOD_RQUNLOCK(iod);
574 herror = smb_iod_sendrq(iod, rqp);
575 SMB_IOD_RQLOCK(iod);
576 rqp->sr_flags &= ~SMBR_XLOCK;
577 if (rqp->sr_flags & SMBR_XLOCKWANT) {
578 rqp->sr_flags &= ~SMBR_XLOCKWANT;
579 wakeup(rqp);
580 }
581
582 if (__predict_false(herror != 0))
583 break;
584 }
585 }
586 SMB_IOD_RQUNLOCK(iod);
587 if (herror == ENOTCONN)
588 smb_iod_dead(iod);
589 }
590
591 /*
592 * "main" function for smbiod daemon
593 */
594 static inline void
595 smb_iod_main(struct smbiod *iod)
596 {
597 #if 0
598 struct smb_vc *vcp = iod->iod_vc;
599 struct timespec tsnow;
600 #endif
601 struct smbiod_event *evp;
602
603 SMBIODEBUG(("\n"));
604
605 /*
606 * Check all interesting events
607 */
608 for (;;) {
609 SMB_IOD_EVLOCK(iod);
610 evp = SIMPLEQ_FIRST(&iod->iod_evlist);
611 if (evp == NULL) {
612 SMB_IOD_EVUNLOCK(iod);
613 break;
614 }
615 SIMPLEQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
616 evp->ev_type |= SMBIOD_EV_PROCESSING;
617 SMB_IOD_EVUNLOCK(iod);
618 switch (evp->ev_type & SMBIOD_EV_MASK) {
619 case SMBIOD_EV_CONNECT:
620 iod->iod_state = SMBIOD_ST_RECONNECT;
621 evp->ev_error = smb_iod_connect(iod);
622 break;
623 case SMBIOD_EV_DISCONNECT:
624 evp->ev_error = smb_iod_disconnect(iod);
625 break;
626 case SMBIOD_EV_TREECONNECT:
627 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
628 break;
629 case SMBIOD_EV_SHUTDOWN:
630 iod->iod_flags |= SMBIOD_SHUTDOWN;
631 break;
632 case SMBIOD_EV_NEWRQ:
633 break;
634 }
635 if (evp->ev_type & SMBIOD_EV_SYNC) {
636 SMB_IOD_EVLOCK(iod);
637 wakeup(evp);
638 SMB_IOD_EVUNLOCK(iod);
639 } else
640 free(evp, M_SMBIOD);
641 }
642 #if 0
643 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
644 getnanotime(&tsnow);
645 timespecsub(&tsnow, &iod->iod_pingtimo);
646 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
647 smb_smb_echo(vcp, &iod->iod_scred);
648 }
649 }
650 #endif
651 smb_iod_sendall(iod);
652 smb_iod_recvall(iod);
653 }
654
655 void
656 smb_iod_thread(void *arg)
657 {
658 struct smbiod *iod = arg;
659 int s;
660
661 /*
662 * Here we assume that the thread structure will be the same
663 * for an entire kthread (kproc, to be more precise) life.
664 */
665 KASSERT(iod->iod_l == curlwp);
666 smb_makescred(&iod->iod_scred, iod->iod_l, NULL);
667 s = splnet();
668 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
669 smb_iod_main(iod);
670 if (iod->iod_flags & SMBIOD_SHUTDOWN)
671 break;
672 SMBIODEBUG(("going to sleep\n"));
673 tsleep(&iod->iod_flags, PSOCK, "smbidle", 0);
674 }
675 splx(s);
676 kthread_exit(0);
677 }
678
679 int
680 smb_iod_create(struct smb_vc *vcp)
681 {
682 struct smbiod *iod;
683 int error;
684
685 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
686 iod->iod_id = smb_iod_next++;
687 iod->iod_state = SMBIOD_ST_NOTCONN;
688 iod->iod_vc = vcp;
689 #if 0
690 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
691 microtime(&iod->iod_lastrqsent);
692 #endif
693 vcp->vc_iod = iod;
694 smb_sl_init(&iod->iod_rqlock, "smbrql");
695 SIMPLEQ_INIT(&iod->iod_rqlist);
696 smb_sl_init(&iod->iod_evlock, "smbevl");
697 SIMPLEQ_INIT(&iod->iod_evlist);
698 #ifdef __NetBSD__
699 error = kthread_create(PRI_NONE, 0, NULL, smb_iod_thread, iod,
700 &iod->iod_l, "smbiod%d", iod->iod_id);
701 #else
702 error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
703 RFNOWAIT, "smbiod%d", iod->iod_id);
704 #endif
705 if (error) {
706 SMBIODEBUG(("can't start smbiod: %d", error));
707 free(iod, M_SMBIOD);
708 return error;
709 }
710 return 0;
711 }
712
713 int
714 smb_iod_destroy(struct smbiod *iod)
715 {
716 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
717 smb_sl_destroy(&iod->iod_rqlock);
718 smb_sl_destroy(&iod->iod_evlock);
719 free(iod, M_SMBIOD);
720 return 0;
721 }
722
723 int
724 smb_iod_init(void)
725 {
726 return 0;
727 }
728
729 #ifndef __NetBSD__
730 int
731 smb_iod_done(void)
732 {
733 return 0;
734 }
735 #endif
Cache object: d61bddb2dc82fdc90cbaee56c29f70a4
|