FreeBSD/Linux Kernel Cross Reference
sys/netsmb/smb_iod.c
1 /*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: releng/5.1/sys/netsmb/smb_iod.c 112888 2003-03-31 22:49:17Z jeff $
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/endian.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/unistd.h>
44
45 #include <netsmb/smb.h>
46 #include <netsmb/smb_conn.h>
47 #include <netsmb/smb_rq.h>
48 #include <netsmb/smb_tran.h>
49 #include <netsmb/smb_trantcp.h>
50
51
52 #define SMBIOD_SLEEP_TIMO 2
53 #define SMBIOD_PING_TIMO 60 /* seconds */
54
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
58
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
62
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
64
65
66 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
67
68 static int smb_iod_next;
69
70 static int smb_iod_sendall(struct smbiod *iod);
71 static int smb_iod_disconnect(struct smbiod *iod);
72 static void smb_iod_thread(void *);
73
74 static __inline void
75 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
76 {
77 SMBRQ_SLOCK(rqp);
78 rqp->sr_lerror = error;
79 rqp->sr_rpgen++;
80 rqp->sr_state = SMBRQ_NOTIFIED;
81 wakeup(&rqp->sr_state);
82 SMBRQ_SUNLOCK(rqp);
83 }
84
85 static void
86 smb_iod_invrq(struct smbiod *iod)
87 {
88 struct smb_rq *rqp;
89
90 /*
91 * Invalidate all outstanding requests for this connection
92 */
93 SMB_IOD_RQLOCK(iod);
94 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
95 if (rqp->sr_flags & SMBR_INTERNAL)
96 SMBRQ_SUNLOCK(rqp);
97 rqp->sr_flags |= SMBR_RESTART;
98 smb_iod_rqprocessed(rqp, ENOTCONN);
99 }
100 SMB_IOD_RQUNLOCK(iod);
101 }
102
103 static void
104 smb_iod_closetran(struct smbiod *iod)
105 {
106 struct smb_vc *vcp = iod->iod_vc;
107 struct thread *td = iod->iod_td;
108
109 if (vcp->vc_tdata == NULL)
110 return;
111 SMB_TRAN_DISCONNECT(vcp, td);
112 SMB_TRAN_DONE(vcp, td);
113 vcp->vc_tdata = NULL;
114 }
115
116 static void
117 smb_iod_dead(struct smbiod *iod)
118 {
119 iod->iod_state = SMBIOD_ST_DEAD;
120 smb_iod_closetran(iod);
121 smb_iod_invrq(iod);
122 }
123
124 static int
125 smb_iod_connect(struct smbiod *iod)
126 {
127 struct smb_vc *vcp = iod->iod_vc;
128 struct thread *td = iod->iod_td;
129 int error;
130
131 SMBIODEBUG("%d\n", iod->iod_state);
132 switch(iod->iod_state) {
133 case SMBIOD_ST_VCACTIVE:
134 SMBERROR("called for already opened connection\n");
135 return EISCONN;
136 case SMBIOD_ST_DEAD:
137 return ENOTCONN; /* XXX: last error code ? */
138 default:
139 break;
140 }
141 vcp->vc_genid++;
142 error = 0;
143 itry {
144 ithrow(SMB_TRAN_CREATE(vcp, td));
145 SMBIODEBUG("tcreate\n");
146 if (vcp->vc_laddr) {
147 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
148 }
149 SMBIODEBUG("tbind\n");
150 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
151 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
152 iod->iod_state = SMBIOD_ST_TRANACTIVE;
153 SMBIODEBUG("tconnect\n");
154 /* vcp->vc_mid = 0;*/
155 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
156 SMBIODEBUG("snegotiate\n");
157 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
158 iod->iod_state = SMBIOD_ST_VCACTIVE;
159 SMBIODEBUG("completed\n");
160 smb_iod_invrq(iod);
161 } icatch(error) {
162 smb_iod_dead(iod);
163 } ifinally {
164 } iendtry;
165 return error;
166 }
167
168 static int
169 smb_iod_disconnect(struct smbiod *iod)
170 {
171 struct smb_vc *vcp = iod->iod_vc;
172
173 SMBIODEBUG("\n");
174 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
175 smb_smb_ssnclose(vcp, &iod->iod_scred);
176 iod->iod_state = SMBIOD_ST_TRANACTIVE;
177 }
178 vcp->vc_smbuid = SMB_UID_UNKNOWN;
179 smb_iod_closetran(iod);
180 iod->iod_state = SMBIOD_ST_NOTCONN;
181 return 0;
182 }
183
184 static int
185 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
186 {
187 int error;
188
189 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
190 if (iod->iod_state != SMBIOD_ST_DEAD)
191 return ENOTCONN;
192 iod->iod_state = SMBIOD_ST_RECONNECT;
193 error = smb_iod_connect(iod);
194 if (error)
195 return error;
196 }
197 SMBIODEBUG("tree reconnect\n");
198 SMBS_ST_LOCK(ssp);
199 ssp->ss_flags |= SMBS_RECONNECTING;
200 SMBS_ST_UNLOCK(ssp);
201 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
202 SMBS_ST_LOCK(ssp);
203 ssp->ss_flags &= ~SMBS_RECONNECTING;
204 SMBS_ST_UNLOCK(ssp);
205 wakeup(&ssp->ss_vcgenid);
206 return error;
207 }
208
209 static int
210 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
211 {
212 struct thread *td = iod->iod_td;
213 struct smb_vc *vcp = iod->iod_vc;
214 struct smb_share *ssp = rqp->sr_share;
215 struct mbuf *m;
216 int error;
217
218 SMBIODEBUG("iod_state = %d\n", iod->iod_state);
219 switch (iod->iod_state) {
220 case SMBIOD_ST_NOTCONN:
221 smb_iod_rqprocessed(rqp, ENOTCONN);
222 return 0;
223 case SMBIOD_ST_DEAD:
224 iod->iod_state = SMBIOD_ST_RECONNECT;
225 return 0;
226 case SMBIOD_ST_RECONNECT:
227 return 0;
228 default:
229 break;
230 }
231 if (rqp->sr_sendcnt == 0) {
232 #ifdef movedtoanotherplace
233 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
234 return 0;
235 #endif
236 *rqp->sr_rqtid = htole16(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
237 *rqp->sr_rquid = htole16(vcp ? vcp->vc_smbuid : 0);
238 mb_fixhdr(&rqp->sr_rq);
239 }
240 if (rqp->sr_sendcnt++ > 5) {
241 rqp->sr_flags |= SMBR_RESTART;
242 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
243 /*
244 * If all attempts to send a request failed, then
245 * something is seriously hosed.
246 */
247 return ENOTCONN;
248 }
249 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
250 m_dumpm(rqp->sr_rq.mb_top);
251 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_TRYWAIT);
252 error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
253 if (error == 0) {
254 getnanotime(&rqp->sr_timesent);
255 iod->iod_lastrqsent = rqp->sr_timesent;
256 rqp->sr_flags |= SMBR_SENT;
257 rqp->sr_state = SMBRQ_SENT;
258 return 0;
259 }
260 /*
261 * Check for fatal errors
262 */
263 if (SMB_TRAN_FATAL(vcp, error)) {
264 /*
265 * No further attempts should be made
266 */
267 return ENOTCONN;
268 }
269 if (smb_rq_intr(rqp))
270 smb_iod_rqprocessed(rqp, EINTR);
271 return 0;
272 }
273
274 /*
275 * Process incoming packets
276 */
277 static int
278 smb_iod_recvall(struct smbiod *iod)
279 {
280 struct smb_vc *vcp = iod->iod_vc;
281 struct thread *td = iod->iod_td;
282 struct smb_rq *rqp;
283 struct mbuf *m;
284 u_char *hp;
285 u_short mid;
286 int error;
287
288 switch (iod->iod_state) {
289 case SMBIOD_ST_NOTCONN:
290 case SMBIOD_ST_DEAD:
291 case SMBIOD_ST_RECONNECT:
292 return 0;
293 default:
294 break;
295 }
296 for (;;) {
297 m = NULL;
298 error = SMB_TRAN_RECV(vcp, &m, td);
299 if (error == EWOULDBLOCK)
300 break;
301 if (SMB_TRAN_FATAL(vcp, error)) {
302 smb_iod_dead(iod);
303 break;
304 }
305 if (error)
306 break;
307 if (m == NULL) {
308 SMBERROR("tran return NULL without error\n");
309 error = EPIPE;
310 continue;
311 }
312 m = m_pullup(m, SMB_HDRLEN);
313 if (m == NULL)
314 continue; /* wait for a good packet */
315 /*
316 * Now we got an entire and possibly invalid SMB packet.
317 * Be careful while parsing it.
318 */
319 m_dumpm(m);
320 hp = mtod(m, u_char*);
321 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
322 m_freem(m);
323 continue;
324 }
325 mid = SMB_HDRMID(hp);
326 SMBSDEBUG("mid %04x\n", (u_int)mid);
327 SMB_IOD_RQLOCK(iod);
328 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
329 if (rqp->sr_mid != mid)
330 continue;
331 SMBRQ_SLOCK(rqp);
332 if (rqp->sr_rp.md_top == NULL) {
333 md_initm(&rqp->sr_rp, m);
334 } else {
335 if (rqp->sr_flags & SMBR_MULTIPACKET) {
336 md_append_record(&rqp->sr_rp, m);
337 } else {
338 SMBRQ_SUNLOCK(rqp);
339 SMBERROR("duplicate response %d (ignored)\n", mid);
340 break;
341 }
342 }
343 SMBRQ_SUNLOCK(rqp);
344 smb_iod_rqprocessed(rqp, 0);
345 break;
346 }
347 SMB_IOD_RQUNLOCK(iod);
348 if (rqp == NULL) {
349 SMBERROR("drop resp with mid %d\n", (u_int)mid);
350 /* smb_printrqlist(vcp);*/
351 m_freem(m);
352 }
353 }
354 /*
355 * check for interrupts
356 */
357 SMB_IOD_RQLOCK(iod);
358 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
359 if (smb_td_intr(rqp->sr_cred->scr_td)) {
360 smb_iod_rqprocessed(rqp, EINTR);
361 }
362 }
363 SMB_IOD_RQUNLOCK(iod);
364 return 0;
365 }
366
367 int
368 smb_iod_request(struct smbiod *iod, int event, void *ident)
369 {
370 struct smbiod_event *evp;
371 int error;
372
373 SMBIODEBUG("\n");
374 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
375 evp->ev_type = event;
376 evp->ev_ident = ident;
377 SMB_IOD_EVLOCK(iod);
378 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
379 if ((event & SMBIOD_EV_SYNC) == 0) {
380 SMB_IOD_EVUNLOCK(iod);
381 smb_iod_wakeup(iod);
382 return 0;
383 }
384 smb_iod_wakeup(iod);
385 msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
386 error = evp->ev_error;
387 free(evp, M_SMBIOD);
388 return error;
389 }
390
391 /*
392 * Place request in the queue.
393 * Request from smbiod have a high priority.
394 */
395 int
396 smb_iod_addrq(struct smb_rq *rqp)
397 {
398 struct smb_vc *vcp = rqp->sr_vc;
399 struct smbiod *iod = vcp->vc_iod;
400 int error;
401
402 SMBIODEBUG("\n");
403 if (rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
404 rqp->sr_flags |= SMBR_INTERNAL;
405 SMB_IOD_RQLOCK(iod);
406 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
407 SMB_IOD_RQUNLOCK(iod);
408 for (;;) {
409 if (smb_iod_sendrq(iod, rqp) != 0) {
410 smb_iod_dead(iod);
411 break;
412 }
413 /*
414 * we don't need to lock state field here
415 */
416 if (rqp->sr_state != SMBRQ_NOTSENT)
417 break;
418 tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
419 }
420 if (rqp->sr_lerror)
421 smb_iod_removerq(rqp);
422 return rqp->sr_lerror;
423 }
424
425 switch (iod->iod_state) {
426 case SMBIOD_ST_NOTCONN:
427 return ENOTCONN;
428 case SMBIOD_ST_DEAD:
429 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
430 if (error)
431 return error;
432 return EXDEV;
433 default:
434 break;
435 }
436
437 SMB_IOD_RQLOCK(iod);
438 for (;;) {
439 if (vcp->vc_maxmux == 0) {
440 SMBERROR("maxmux == 0\n");
441 break;
442 }
443 if (iod->iod_muxcnt < vcp->vc_maxmux)
444 break;
445 iod->iod_muxwant++;
446 msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
447 PWAIT, "90mux", 0);
448 }
449 iod->iod_muxcnt++;
450 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
451 SMB_IOD_RQUNLOCK(iod);
452 smb_iod_wakeup(iod);
453 return 0;
454 }
455
456 int
457 smb_iod_removerq(struct smb_rq *rqp)
458 {
459 struct smb_vc *vcp = rqp->sr_vc;
460 struct smbiod *iod = vcp->vc_iod;
461
462 SMBIODEBUG("\n");
463 if (rqp->sr_flags & SMBR_INTERNAL) {
464 SMB_IOD_RQLOCK(iod);
465 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
466 SMB_IOD_RQUNLOCK(iod);
467 return 0;
468 }
469 SMB_IOD_RQLOCK(iod);
470 while (rqp->sr_flags & SMBR_XLOCK) {
471 rqp->sr_flags |= SMBR_XLOCKWANT;
472 msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
473 }
474 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
475 iod->iod_muxcnt--;
476 if (iod->iod_muxwant) {
477 iod->iod_muxwant--;
478 wakeup(&iod->iod_muxwant);
479 }
480 SMB_IOD_RQUNLOCK(iod);
481 return 0;
482 }
483
484 int
485 smb_iod_waitrq(struct smb_rq *rqp)
486 {
487 struct smbiod *iod = rqp->sr_vc->vc_iod;
488 int error;
489
490 SMBIODEBUG("\n");
491 if (rqp->sr_flags & SMBR_INTERNAL) {
492 for (;;) {
493 smb_iod_sendall(iod);
494 smb_iod_recvall(iod);
495 if (rqp->sr_rpgen != rqp->sr_rplast)
496 break;
497 tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
498 }
499 smb_iod_removerq(rqp);
500 return rqp->sr_lerror;
501
502 }
503 SMBRQ_SLOCK(rqp);
504 if (rqp->sr_rpgen == rqp->sr_rplast)
505 msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
506 rqp->sr_rplast++;
507 SMBRQ_SUNLOCK(rqp);
508 error = rqp->sr_lerror;
509 if (rqp->sr_flags & SMBR_MULTIPACKET) {
510 /*
511 * If request should stay in the list, then reinsert it
512 * at the end of queue so other waiters have chance to concur
513 */
514 SMB_IOD_RQLOCK(iod);
515 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
516 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
517 SMB_IOD_RQUNLOCK(iod);
518 } else
519 smb_iod_removerq(rqp);
520 return error;
521 }
522
523
524 static int
525 smb_iod_sendall(struct smbiod *iod)
526 {
527 struct smb_vc *vcp = iod->iod_vc;
528 struct smb_rq *rqp;
529 struct timespec ts, tstimeout;
530 int herror;
531
532 herror = 0;
533 /*
534 * Loop through the list of requests and send them if possible
535 */
536 SMB_IOD_RQLOCK(iod);
537 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
538 switch (rqp->sr_state) {
539 case SMBRQ_NOTSENT:
540 rqp->sr_flags |= SMBR_XLOCK;
541 SMB_IOD_RQUNLOCK(iod);
542 herror = smb_iod_sendrq(iod, rqp);
543 SMB_IOD_RQLOCK(iod);
544 rqp->sr_flags &= ~SMBR_XLOCK;
545 if (rqp->sr_flags & SMBR_XLOCKWANT) {
546 rqp->sr_flags &= ~SMBR_XLOCKWANT;
547 wakeup(rqp);
548 }
549 break;
550 case SMBRQ_SENT:
551 SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
552 timespecadd(&tstimeout, &tstimeout);
553 getnanotime(&ts);
554 timespecsub(&ts, &tstimeout);
555 if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
556 smb_iod_rqprocessed(rqp, ETIMEDOUT);
557 }
558 break;
559 default:
560 break;
561 }
562 if (herror)
563 break;
564 }
565 SMB_IOD_RQUNLOCK(iod);
566 if (herror == ENOTCONN)
567 smb_iod_dead(iod);
568 return 0;
569 }
570
571 /*
572 * "main" function for smbiod daemon
573 */
574 static __inline void
575 smb_iod_main(struct smbiod *iod)
576 {
577 /* struct smb_vc *vcp = iod->iod_vc;*/
578 struct smbiod_event *evp;
579 /* struct timespec tsnow;*/
580 int error;
581
582 SMBIODEBUG("\n");
583 error = 0;
584
585 /*
586 * Check all interesting events
587 */
588 for (;;) {
589 SMB_IOD_EVLOCK(iod);
590 evp = STAILQ_FIRST(&iod->iod_evlist);
591 if (evp == NULL) {
592 SMB_IOD_EVUNLOCK(iod);
593 break;
594 }
595 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
596 evp->ev_type |= SMBIOD_EV_PROCESSING;
597 SMB_IOD_EVUNLOCK(iod);
598 switch (evp->ev_type & SMBIOD_EV_MASK) {
599 case SMBIOD_EV_CONNECT:
600 iod->iod_state = SMBIOD_ST_RECONNECT;
601 evp->ev_error = smb_iod_connect(iod);
602 break;
603 case SMBIOD_EV_DISCONNECT:
604 evp->ev_error = smb_iod_disconnect(iod);
605 break;
606 case SMBIOD_EV_TREECONNECT:
607 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
608 break;
609 case SMBIOD_EV_SHUTDOWN:
610 iod->iod_flags |= SMBIOD_SHUTDOWN;
611 break;
612 case SMBIOD_EV_NEWRQ:
613 break;
614 }
615 if (evp->ev_type & SMBIOD_EV_SYNC) {
616 SMB_IOD_EVLOCK(iod);
617 wakeup(evp);
618 SMB_IOD_EVUNLOCK(iod);
619 } else
620 free(evp, M_SMBIOD);
621 }
622 #if 0
623 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
624 getnanotime(&tsnow);
625 timespecsub(&tsnow, &iod->iod_pingtimo);
626 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
627 smb_smb_echo(vcp, &iod->iod_scred);
628 }
629 }
630 #endif
631 smb_iod_sendall(iod);
632 smb_iod_recvall(iod);
633 return;
634 }
635
636 void
637 smb_iod_thread(void *arg)
638 {
639 struct smbiod *iod = arg;
640
641 mtx_lock(&Giant);
642 /*
643 * Here we assume that the thread structure will be the same
644 * for an entire kthread (kproc, to be more precise) life.
645 */
646 iod->iod_td = curthread;
647 smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
648 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
649 smb_iod_main(iod);
650 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
651 /* mtx_unlock(&Giant, MTX_DEF);*/
652 if (iod->iod_flags & SMBIOD_SHUTDOWN)
653 break;
654 tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
655 }
656 /* mtx_lock(&Giant, MTX_DEF);*/
657 kthread_exit(0);
658 }
659
660 int
661 smb_iod_create(struct smb_vc *vcp)
662 {
663 struct smbiod *iod;
664 int error;
665
666 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
667 iod->iod_id = smb_iod_next++;
668 iod->iod_state = SMBIOD_ST_NOTCONN;
669 iod->iod_vc = vcp;
670 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
671 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
672 getnanotime(&iod->iod_lastrqsent);
673 vcp->vc_iod = iod;
674 smb_sl_init(&iod->iod_rqlock, "90rql");
675 TAILQ_INIT(&iod->iod_rqlist);
676 smb_sl_init(&iod->iod_evlock, "90evl");
677 STAILQ_INIT(&iod->iod_evlist);
678 error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
679 RFNOWAIT, 0, "smbiod%d", iod->iod_id);
680 if (error) {
681 SMBERROR("can't start smbiod: %d", error);
682 free(iod, M_SMBIOD);
683 return error;
684 }
685 return 0;
686 }
687
688 int
689 smb_iod_destroy(struct smbiod *iod)
690 {
691 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
692 smb_sl_destroy(&iod->iod_rqlock);
693 smb_sl_destroy(&iod->iod_evlock);
694 free(iod, M_SMBIOD);
695 return 0;
696 }
697
698 int
699 smb_iod_init(void)
700 {
701 return 0;
702 }
703
704 int
705 smb_iod_done(void)
706 {
707 return 0;
708 }
709
Cache object: c2f8a4d691aecb8ef0cd4165a7bc831a
|