FreeBSD/Linux Kernel Cross Reference
sys/netsmb/smb_rq.c
1 /* $NetBSD: smb_rq.c,v 1.30 2008/06/24 10:37:19 gmcgarry Exp $ */
2
3 /*
4 * Copyright (c) 2000-2001, Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/netsmb/smb_rq.c,v 1.4 2001/12/09 17:48:08 arr Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_rq.c,v 1.30 2008/06/24 10:37:19 gmcgarry Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/mbuf.h>
49
50 #include <netsmb/smb.h>
51 #include <netsmb/smb_conn.h>
52 #include <netsmb/smb_rq.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_tran.h>
55
56 #ifndef __NetBSD__
57 MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
58 #endif
59
60 #ifdef __NetBSD__
61 POOL_INIT(smbrq_pool, sizeof(struct smb_rq), 0, 0, 0, "smbrqpl",
62 &pool_allocator_nointr, IPL_NONE);
63 POOL_INIT(smbt2rq_pool, sizeof(struct smb_t2rq), 0, 0, 0, "smbt2pl",
64 &pool_allocator_nointr, IPL_NONE);
65 #endif
66
67 static int smb_rq_init(struct smb_rq *, struct smb_connobj *, u_char,
68 struct smb_cred *);
69 static int smb_rq_getenv(struct smb_connobj *layer,
70 struct smb_vc **vcpp, struct smb_share **sspp);
71 static int smb_rq_new(struct smb_rq *rqp, u_char cmd);
72 static int smb_t2_init(struct smb_t2rq *, struct smb_connobj *, u_short,
73 struct smb_cred *);
74 static int smb_t2_reply(struct smb_t2rq *t2p);
75
76 #ifndef __NetBSD__
77 int
78 smb_rqinit(void)
79 {
80 pool_init(&smbrq_pool, sizeof(struct smb_rq), 0, 0, 0,
81 "smbrqpl", &pool_allocator_nointr, IPL_NONE);
82 pool_init(&smbt2rq_pool, sizeof(struct smb_t2rq), 0, 0, 0,
83 "smbt2pl", &pool_allocator_nointr, IPL_NONE);
84 return (0);
85 }
86 #endif
87
88 int
89 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
90 struct smb_rq **rqpp)
91 {
92 struct smb_rq *rqp;
93 int error;
94
95 rqp = pool_get(&smbrq_pool, PR_WAITOK);
96 error = smb_rq_init(rqp, layer, cmd, scred);
97 rqp->sr_flags |= SMBR_ALLOCED;
98 if (error) {
99 smb_rq_done(rqp);
100 return error;
101 }
102 *rqpp = rqp;
103 return 0;
104 }
105
106 static int
107 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
108 struct smb_cred *scred)
109 {
110 int error;
111 struct timeval timo;
112
113 bzero(rqp, sizeof(*rqp));
114 smb_sl_init(&rqp->sr_slock, "srslock");
115 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
116 if (error)
117 return error;
118 error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
119 if (error)
120 return error;
121 if (rqp->sr_share) {
122 error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
123 if (error)
124 return error;
125 }
126 rqp->sr_cred = scred;
127 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
128 SMB_TRAN_GETPARAM(rqp->sr_vc, SMBTP_TIMEOUT, &timo);
129 rqp->sr_timo = timo.tv_sec * hz;
130 return smb_rq_new(rqp, cmd);
131 }
132
133 static int
134 smb_rq_new(struct smb_rq *rqp, u_char cmd)
135 {
136 struct smb_vc *vcp = rqp->sr_vc;
137 struct mbchain *mbp = &rqp->sr_rq;
138 int error;
139
140 rqp->sr_sendcnt = 0;
141 mb_done(mbp);
142 md_done(&rqp->sr_rp);
143 error = mb_init(mbp);
144 if (error)
145 return error;
146 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
147 mb_put_uint8(mbp, cmd);
148 mb_put_uint32le(mbp, 0); /* DosError */
149 mb_put_uint8(mbp, vcp->vc_hflags);
150 mb_put_uint16le(mbp, vcp->vc_hflags2);
151 mb_put_mem(mbp, NULL, 12, MB_MZERO);
152 rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
153 /*
154 * SMB packet PID is used for lock validation. Besides that,
155 * it's opaque for the server.
156 */
157 mb_put_uint16le(mbp, 1 /*rqp->sr_cred->scr_p->p_pid & 0xffff*/);
158 rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
159 mb_put_uint16le(mbp, rqp->sr_mid);
160 return 0;
161 }
162
163 void
164 smb_rq_done(struct smb_rq *rqp)
165 {
166 mb_done(&rqp->sr_rq);
167 md_done(&rqp->sr_rp);
168 smb_sl_destroy(&rqp->sr_slock);
169 if (rqp->sr_flags & SMBR_ALLOCED)
170 pool_put(&smbrq_pool, rqp);
171 }
172
173 /*
174 * Simple request-reply exchange
175 */
176 int
177 smb_rq_simple(struct smb_rq *rqp)
178 {
179 int error, i;
180
181 for (i = 0; i < SMB_MAXRCN; i++) {
182 rqp->sr_flags &= ~SMBR_RESTART;
183 rqp->sr_state = SMBRQ_NOTSENT;
184 error = smb_rq_enqueue(rqp);
185 if (error)
186 return error;
187 error = smb_rq_reply(rqp);
188 if (!error)
189 break;
190 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
191 break;
192 }
193 return error;
194 }
195
196 int
197 smb_rq_enqueue(struct smb_rq *rqp)
198 {
199 struct smb_share *ssp = rqp->sr_share;
200 int error;
201
202 if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
203 return smb_iod_addrq(rqp);
204 }
205 for (;;) {
206 SMBS_ST_LOCK(ssp);
207 if (ssp->ss_flags & SMBS_RECONNECTING) {
208 SMBS_ST_UNLOCK(ssp);
209 error = mtsleep(&ssp->ss_vcgenid,
210 PWAIT | PCATCH | PNORELOCK,
211 "smbtrcn", hz, SMBS_ST_LOCKPTR(ssp));
212 if (error && error != EWOULDBLOCK)
213 return (error);
214 continue;
215 }
216 if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
217 SMBS_ST_UNLOCK(ssp);
218 } else {
219 SMBS_ST_UNLOCK(ssp);
220 error = smb_iod_request(rqp->sr_vc->vc_iod,
221 SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
222 if (error)
223 return error;
224 }
225 error = smb_iod_addrq(rqp);
226 if (error != EXDEV)
227 break;
228 }
229 return error;
230 }
231
232 void
233 smb_rq_wstart(struct smb_rq *rqp)
234 {
235 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
236 rqp->sr_rq.mb_count = 0;
237 }
238
239 void
240 smb_rq_wend(struct smb_rq *rqp)
241 {
242 #ifdef DIAGNOSTIC
243 if (rqp->sr_wcount == NULL)
244 panic("smb_rq_wend: no wcount");
245 if (rqp->sr_rq.mb_count & 1)
246 panic("smb_rq_wend: odd word count");
247 #endif
248 rqp->sr_wcount[0] = rqp->sr_rq.mb_count / 2;
249 }
250
251 void
252 smb_rq_bstart(struct smb_rq *rqp)
253 {
254 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_int16_t));
255 rqp->sr_rq.mb_count = 0;
256 }
257
258 void
259 smb_rq_bend(struct smb_rq *rqp)
260 {
261 u_int16_t bcnt = rqp->sr_rq.mb_count;
262
263 #ifdef DIAGNOSTIC
264 if (rqp->sr_bcount == NULL)
265 panic("smb_rq_bend: no bcount");
266 if (rqp->sr_rq.mb_count > 0xffff)
267 panic("smb_rq_bend: byte count too large (%d)", bcnt);
268 #endif
269 SMBRQ_PUTLE16(rqp->sr_bcount, bcnt);
270 }
271
272 int
273 smb_rq_intr(struct smb_rq *rqp)
274 {
275 struct lwp *l = rqp->sr_cred->scr_l;
276
277 if (rqp->sr_flags & SMBR_INTR)
278 return EINTR;
279 return smb_proc_intr(l);
280 }
281
282 int
283 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
284 {
285 *mbpp = &rqp->sr_rq;
286 return 0;
287 }
288
289 int
290 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
291 {
292 *mbpp = &rqp->sr_rp;
293 return 0;
294 }
295
296 static int
297 smb_rq_getenv(struct smb_connobj *layer,
298 struct smb_vc **vcpp, struct smb_share **sspp)
299 {
300 struct smb_vc *vcp = NULL;
301 struct smb_share *ssp = NULL;
302 struct smb_connobj *cp;
303 int error = 0;
304
305 switch (layer->co_level) {
306 case SMBL_VC:
307 vcp = CPTOVC(layer);
308 if (layer->co_parent == NULL) {
309 SMBERROR(("zombie VC %s\n", vcp->vc_srvname));
310 error = EINVAL;
311 break;
312 }
313 break;
314 case SMBL_SHARE:
315 ssp = CPTOSS(layer);
316 cp = layer->co_parent;
317 if (cp == NULL) {
318 SMBERROR(("zombie share %s\n", ssp->ss_name));
319 error = EINVAL;
320 break;
321 }
322 error = smb_rq_getenv(cp, &vcp, NULL);
323 if (error)
324 break;
325 break;
326 default:
327 SMBERROR(("invalid layer %d passed\n", layer->co_level));
328 error = EINVAL;
329 }
330 if (vcpp)
331 *vcpp = vcp;
332 if (sspp)
333 *sspp = ssp;
334 return error;
335 }
336
337 /*
338 * Wait for reply on the request
339 */
340 int
341 smb_rq_reply(struct smb_rq *rqp)
342 {
343 struct mdchain *mdp = &rqp->sr_rp;
344 int error;
345 u_int8_t errclass;
346 u_int16_t serror;
347
348 error = smb_iod_waitrq(rqp);
349 if (error)
350 return error;
351 error = md_get_uint32(mdp, NULL);
352 if (error)
353 return error;
354 (void) md_get_uint8(mdp, NULL);
355 if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
356 (void) md_get_uint32(mdp, NULL); /* XXX ignored? */
357 } else {
358 (void) md_get_uint8(mdp, &errclass);
359 (void) md_get_uint8(mdp, NULL);
360 error = md_get_uint16le(mdp, &serror);
361 if (!error)
362 error = smb_maperror(errclass, serror);
363 }
364 (void) md_get_uint8(mdp, NULL); /* rpflags */
365 (void) md_get_uint16(mdp, NULL); /* rpflags2 */
366
367 (void) md_get_uint32(mdp, NULL);
368 (void) md_get_uint32(mdp, NULL);
369 (void) md_get_uint32(mdp, NULL);
370
371 (void) md_get_uint16le(mdp, &rqp->sr_rptid);
372 (void) md_get_uint16le(mdp, &rqp->sr_rppid);
373 (void) md_get_uint16le(mdp, &rqp->sr_rpuid);
374 (void) md_get_uint16le(mdp, &rqp->sr_rpmid);
375
376 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
377 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
378 errclass, serror));
379 return (error);
380 }
381
382 void
383 smb_rq_setcallback(struct smb_rq *rqp, void (*recvcallb)(void *), void *arg)
384 {
385 SMBRQ_SLOCK(rqp);
386 rqp->sr_recvcallback = recvcallb;
387 rqp->sr_recvarg = arg;
388 SMBRQ_SUNLOCK(rqp);
389 }
390
391 #define ALIGN4(a) (((a) + 3) & ~3)
392
393 /*
394 * TRANS2 request implementation
395 */
396 int
397 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
398 struct smb_t2rq **t2pp)
399 {
400 struct smb_t2rq *t2p;
401 int error;
402
403 t2p = pool_get(&smbt2rq_pool, PR_WAITOK);
404 error = smb_t2_init(t2p, layer, setup, scred);
405 t2p->t2_flags |= SMBT2_ALLOCED;
406 if (error) {
407 smb_t2_done(t2p);
408 return error;
409 }
410 *t2pp = t2p;
411 return 0;
412 }
413
414 static int
415 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
416 struct smb_cred *scred)
417 {
418 int error;
419
420 bzero(t2p, sizeof(*t2p));
421 t2p->t2_source = source;
422 t2p->t2_setupcount = 1;
423 t2p->t2_setupdata = t2p->t2_setup;
424 t2p->t2_setup[0] = setup;
425 t2p->t2_fid = 0xffff;
426 t2p->t2_cred = scred;
427 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
428 if (error)
429 return error;
430 return 0;
431 }
432
433 void
434 smb_t2_done(struct smb_t2rq *t2p)
435 {
436 mb_done(&t2p->t2_tparam);
437 mb_done(&t2p->t2_tdata);
438 md_done(&t2p->t2_rparam);
439 md_done(&t2p->t2_rdata);
440 if (t2p->t2_flags & SMBT2_ALLOCED)
441 pool_put(&smbt2rq_pool, t2p);
442 }
443
444 static int
445 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
446 struct mdchain *mdp)
447 {
448 struct mbuf *m, *m0;
449 int len;
450
451 m0 = m_split(mtop, offset, M_WAIT);
452 if (m0 == NULL)
453 return EBADRPC;
454 for(len = 0, m = m0; m->m_next; m = m->m_next)
455 len += m->m_len;
456 len += m->m_len;
457 m->m_len -= len - count;
458 if (mdp->md_top == NULL) {
459 md_initm(mdp, m0);
460 } else
461 m_cat(mdp->md_top, m0);
462 return 0;
463 }
464
465 static int
466 smb_t2_reply(struct smb_t2rq *t2p)
467 {
468 struct mdchain *mdp;
469 struct smb_rq *rqp = t2p->t2_rq;
470 int error, totpgot, totdgot;
471 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
472 u_int16_t tmp, bc, dcount;
473 u_int8_t wc;
474
475 error = smb_rq_reply(rqp);
476 if (error)
477 return error;
478 if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
479 /*
480 * this is an interim response, ignore it.
481 */
482 SMBRQ_SLOCK(rqp);
483 md_next_record(&rqp->sr_rp);
484 SMBRQ_SUNLOCK(rqp);
485 return 0;
486 }
487 /*
488 * Now we have to get all subseqent responses. The CIFS specification
489 * says that they can be misordered which is weird.
490 * TODO: timo
491 */
492 totpgot = totdgot = 0;
493 totpcount = totdcount = 0xffff;
494 mdp = &rqp->sr_rp;
495 for (;;) {
496 m_dumpm(mdp->md_top);
497 if ((error = md_get_uint8(mdp, &wc)) != 0)
498 break;
499 if (wc < 10) {
500 error = ENOENT;
501 break;
502 }
503 if ((error = md_get_uint16le(mdp, &tmp)) != 0)
504 break;
505 if (totpcount > tmp)
506 totpcount = tmp;
507 md_get_uint16le(mdp, &tmp);
508 if (totdcount > tmp)
509 totdcount = tmp;
510 if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
511 (error = md_get_uint16le(mdp, &pcount)) != 0 ||
512 (error = md_get_uint16le(mdp, &poff)) != 0 ||
513 (error = md_get_uint16le(mdp, &pdisp)) != 0)
514 break;
515 if (pcount != 0 && pdisp != totpgot) {
516 SMBERROR(("Can't handle misordered parameters %d:%d\n",
517 pdisp, totpgot));
518 error = EINVAL;
519 break;
520 }
521 if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
522 (error = md_get_uint16le(mdp, &doff)) != 0 ||
523 (error = md_get_uint16le(mdp, &ddisp)) != 0)
524 break;
525 if (dcount != 0 && ddisp != totdgot) {
526 SMBERROR(("Can't handle misordered data\n"));
527 error = EINVAL;
528 break;
529 }
530 md_get_uint8(mdp, &wc);
531 md_get_uint8(mdp, NULL);
532 tmp = wc;
533 while (tmp--)
534 md_get_uint16(mdp, NULL);
535 if ((error = md_get_uint16le(mdp, &bc)) != 0)
536 break;
537 /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
538 if (dcount) {
539 error = smb_t2_placedata(mdp->md_top, doff, dcount,
540 &t2p->t2_rdata);
541 if (error)
542 break;
543 }
544 if (pcount) {
545 error = smb_t2_placedata(mdp->md_top, poff, pcount,
546 &t2p->t2_rparam);
547 if (error)
548 break;
549 }
550 totpgot += pcount;
551 totdgot += dcount;
552 if (totpgot >= totpcount && totdgot >= totdcount) {
553 error = 0;
554 t2p->t2_flags |= SMBT2_ALLRECV;
555 break;
556 }
557 /*
558 * We're done with this reply, look for the next one.
559 */
560 SMBRQ_SLOCK(rqp);
561 md_next_record(&rqp->sr_rp);
562 SMBRQ_SUNLOCK(rqp);
563 error = smb_rq_reply(rqp);
564 if (error)
565 break;
566 }
567 return error;
568 }
569
570 /*
571 * Perform a full round of TRANS2 request
572 */
573 static int
574 smb_t2_request_int(struct smb_t2rq *t2p)
575 {
576 struct smb_vc *vcp = t2p->t2_vc;
577 struct smb_cred *scred = t2p->t2_cred;
578 struct mbchain *mbp;
579 struct mdchain *mdp, mbparam, mbdata;
580 struct mbuf *m;
581 struct smb_rq *rqp;
582 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
583 int error, doff, poff, txdcount, txpcount, nmlen;
584
585 m = t2p->t2_tparam.mb_top;
586 if (m) {
587 md_initm(&mbparam, m); /* do not free it! */
588 totpcount = m_fixhdr(m);
589 if (totpcount > 0xffff) /* maxvalue for u_short */
590 return EINVAL;
591 } else
592 totpcount = 0;
593 m = t2p->t2_tdata.mb_top;
594 if (m) {
595 md_initm(&mbdata, m); /* do not free it! */
596 totdcount = m_fixhdr(m);
597 if (totdcount > 0xffff)
598 return EINVAL;
599 } else
600 totdcount = 0;
601 leftdcount = totdcount;
602 leftpcount = totpcount;
603 txmax = vcp->vc_txmax;
604 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
605 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
606 if (error)
607 return error;
608 rqp->sr_flags |= SMBR_MULTIPACKET;
609 t2p->t2_rq = rqp;
610 mbp = &rqp->sr_rq;
611 smb_rq_wstart(rqp);
612 mb_put_uint16le(mbp, totpcount);
613 mb_put_uint16le(mbp, totdcount);
614 mb_put_uint16le(mbp, t2p->t2_maxpcount);
615 mb_put_uint16le(mbp, t2p->t2_maxdcount);
616 mb_put_uint8(mbp, t2p->t2_maxscount);
617 mb_put_uint8(mbp, 0); /* reserved */
618 mb_put_uint16le(mbp, 0); /* flags */
619 mb_put_uint32le(mbp, 0); /* Timeout */
620 mb_put_uint16le(mbp, 0); /* reserved 2 */
621 len = mb_fixhdr(mbp);
622 /*
623 * now we have known packet size as
624 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
625 * and need to decide which parts should go into the first request
626 */
627 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
628 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
629 if (len + leftpcount > txmax) {
630 txpcount = min(leftpcount, txmax - len);
631 poff = len;
632 txdcount = 0;
633 doff = 0;
634 } else {
635 txpcount = leftpcount;
636 poff = txpcount ? len : 0;
637 len = ALIGN4(len + txpcount);
638 txdcount = min(leftdcount, txmax - len);
639 doff = txdcount ? len : 0;
640 }
641 leftpcount -= txpcount;
642 leftdcount -= txdcount;
643 mb_put_uint16le(mbp, txpcount);
644 mb_put_uint16le(mbp, poff);
645 mb_put_uint16le(mbp, txdcount);
646 mb_put_uint16le(mbp, doff);
647 mb_put_uint8(mbp, t2p->t2_setupcount);
648 mb_put_uint8(mbp, 0);
649 for (i = 0; i < t2p->t2_setupcount; i++)
650 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
651 smb_rq_wend(rqp);
652 smb_rq_bstart(rqp);
653 /* TDUNICODE */
654 if (t2p->t_name)
655 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
656 mb_put_uint8(mbp, 0); /* terminating zero */
657 len = mb_fixhdr(mbp);
658 if (txpcount) {
659 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
660 error = md_get_mbuf(&mbparam, txpcount, &m);
661 SMBSDEBUG(("%d:%d:%d\n", error, txpcount, txmax));
662 if (error)
663 goto freerq;
664 mb_put_mbuf(mbp, m);
665 }
666 len = mb_fixhdr(mbp);
667 if (txdcount) {
668 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
669 error = md_get_mbuf(&mbdata, txdcount, &m);
670 if (error)
671 goto freerq;
672 mb_put_mbuf(mbp, m);
673 }
674 smb_rq_bend(rqp); /* incredible, but thats it... */
675 error = smb_rq_enqueue(rqp);
676 if (error)
677 goto freerq;
678 if (leftpcount == 0 && leftdcount == 0)
679 t2p->t2_flags |= SMBT2_ALLSENT;
680 error = smb_t2_reply(t2p);
681 if (error)
682 goto bad;
683 while (leftpcount || leftdcount) {
684 error = smb_rq_new(rqp, t2p->t_name ?
685 SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
686 if (error)
687 goto bad;
688 mbp = &rqp->sr_rq;
689 smb_rq_wstart(rqp);
690 mb_put_uint16le(mbp, totpcount);
691 mb_put_uint16le(mbp, totdcount);
692 len = mb_fixhdr(mbp);
693 /*
694 * now we have known packet size as
695 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
696 * and need to decide which parts should go into request
697 */
698 len = ALIGN4(len + 6 * 2 + 2);
699 if (t2p->t_name == NULL)
700 len += 2;
701 if (len + leftpcount > txmax) {
702 txpcount = min(leftpcount, txmax - len);
703 poff = len;
704 txdcount = 0;
705 doff = 0;
706 } else {
707 txpcount = leftpcount;
708 poff = txpcount ? len : 0;
709 len = ALIGN4(len + txpcount);
710 txdcount = min(leftdcount, txmax - len);
711 doff = txdcount ? len : 0;
712 }
713 mb_put_uint16le(mbp, txpcount);
714 mb_put_uint16le(mbp, poff);
715 mb_put_uint16le(mbp, totpcount - leftpcount);
716 mb_put_uint16le(mbp, txdcount);
717 mb_put_uint16le(mbp, doff);
718 mb_put_uint16le(mbp, totdcount - leftdcount);
719 leftpcount -= txpcount;
720 leftdcount -= txdcount;
721 if (t2p->t_name == NULL)
722 mb_put_uint16le(mbp, t2p->t2_fid);
723 smb_rq_wend(rqp);
724 smb_rq_bstart(rqp);
725 mb_put_uint8(mbp, 0); /* name */
726 len = mb_fixhdr(mbp);
727 if (txpcount) {
728 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
729 error = md_get_mbuf(&mbparam, txpcount, &m);
730 if (error)
731 goto bad;
732 mb_put_mbuf(mbp, m);
733 }
734 len = mb_fixhdr(mbp);
735 if (txdcount) {
736 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
737 error = md_get_mbuf(&mbdata, txdcount, &m);
738 if (error)
739 goto bad;
740 mb_put_mbuf(mbp, m);
741 }
742 smb_rq_bend(rqp);
743 rqp->sr_state = SMBRQ_NOTSENT;
744 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
745 if (error)
746 goto bad;
747 } /* while left params or data */
748 t2p->t2_flags |= SMBT2_ALLSENT;
749 mdp = &t2p->t2_rdata;
750 if (mdp->md_top) {
751 m_fixhdr(mdp->md_top);
752 md_initm(mdp, mdp->md_top);
753 }
754 mdp = &t2p->t2_rparam;
755 if (mdp->md_top) {
756 m_fixhdr(mdp->md_top);
757 md_initm(mdp, mdp->md_top);
758 }
759 bad:
760 smb_iod_removerq(rqp);
761 freerq:
762 smb_rq_done(rqp);
763 if (error) {
764 if (rqp->sr_flags & SMBR_RESTART)
765 t2p->t2_flags |= SMBT2_RESTART;
766 md_done(&t2p->t2_rparam);
767 md_done(&t2p->t2_rdata);
768 }
769 return error;
770 }
771
772 int
773 smb_t2_request(struct smb_t2rq *t2p)
774 {
775 int error = EINVAL, i;
776
777 for (i = 0; i < SMB_MAXRCN; i++) {
778 t2p->t2_flags &= ~SMBT2_RESTART;
779 error = smb_t2_request_int(t2p);
780 if (error == 0)
781 break;
782 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
783 break;
784 }
785 return error;
786 }
Cache object: c8f6f86cff9abffbfdb9ecb3e83e6481
|