FreeBSD/Linux Kernel Cross Reference
sys/netsmb/smb_rq.c
1 /* $NetBSD: smb_rq.c,v 1.24 2003/06/29 22:32:10 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 2000-2001, Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/netsmb/smb_rq.c,v 1.4 2001/12/09 17:48:08 arr Exp
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_rq.c,v 1.24 2003/06/29 22:32:10 fvdl Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/mbuf.h>
49
50 #include <netsmb/smb.h>
51 #include <netsmb/smb_conn.h>
52 #include <netsmb/smb_rq.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_tran.h>
55
56 #ifndef __NetBSD__
57 MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
58 #endif
59
60 static struct pool smbrq_pool;
61 static struct pool smbt2rq_pool;
62
63 static int smb_rq_init(struct smb_rq *, struct smb_connobj *, u_char,
64 struct smb_cred *);
65 static int smb_rq_getenv(struct smb_connobj *layer,
66 struct smb_vc **vcpp, struct smb_share **sspp);
67 static int smb_rq_new(struct smb_rq *rqp, u_char cmd);
68 static int smb_t2_init(struct smb_t2rq *, struct smb_connobj *, u_short,
69 struct smb_cred *);
70 static int smb_t2_reply(struct smb_t2rq *t2p);
71
72 int
73 smb_rqinit(void)
74 {
75 pool_init(&smbrq_pool, sizeof(struct smb_rq), 0, 0, 0,
76 "smbrqpl", &pool_allocator_nointr);
77 pool_init(&smbt2rq_pool, sizeof(struct smb_t2rq), 0, 0, 0,
78 "smbt2pl", &pool_allocator_nointr);
79 return (0);
80 }
81
82 int
83 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
84 struct smb_rq **rqpp)
85 {
86 struct smb_rq *rqp;
87 int error;
88
89 rqp = pool_get(&smbrq_pool, PR_WAITOK);
90 error = smb_rq_init(rqp, layer, cmd, scred);
91 rqp->sr_flags |= SMBR_ALLOCED;
92 if (error) {
93 smb_rq_done(rqp);
94 return error;
95 }
96 *rqpp = rqp;
97 return 0;
98 }
99
100 static int
101 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
102 struct smb_cred *scred)
103 {
104 int error;
105 struct timeval timo;
106
107 bzero(rqp, sizeof(*rqp));
108 smb_sl_init(&rqp->sr_slock, "srslock");
109 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
110 if (error)
111 return error;
112 error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
113 if (error)
114 return error;
115 if (rqp->sr_share) {
116 error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
117 if (error)
118 return error;
119 }
120 rqp->sr_cred = scred;
121 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
122 SMB_TRAN_GETPARAM(rqp->sr_vc, SMBTP_TIMEOUT, &timo);
123 rqp->sr_timo = timo.tv_sec * hz;
124 return smb_rq_new(rqp, cmd);
125 }
126
127 static int
128 smb_rq_new(struct smb_rq *rqp, u_char cmd)
129 {
130 struct smb_vc *vcp = rqp->sr_vc;
131 struct mbchain *mbp = &rqp->sr_rq;
132 int error;
133
134 rqp->sr_sendcnt = 0;
135 mb_done(mbp);
136 md_done(&rqp->sr_rp);
137 error = mb_init(mbp);
138 if (error)
139 return error;
140 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
141 mb_put_uint8(mbp, cmd);
142 mb_put_uint32le(mbp, 0); /* DosError */
143 mb_put_uint8(mbp, vcp->vc_hflags);
144 mb_put_uint16le(mbp, vcp->vc_hflags2);
145 mb_put_mem(mbp, NULL, 12, MB_MZERO);
146 rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
147 /*
148 * SMB packet PID is used for lock validation. Besides that,
149 * it's opaque for the server.
150 */
151 mb_put_uint16le(mbp, 1 /*rqp->sr_cred->scr_p->p_pid & 0xffff*/);
152 rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
153 mb_put_uint16le(mbp, rqp->sr_mid);
154 return 0;
155 }
156
157 void
158 smb_rq_done(struct smb_rq *rqp)
159 {
160 mb_done(&rqp->sr_rq);
161 md_done(&rqp->sr_rp);
162 smb_sl_destroy(&rqp->sr_slock);
163 if (rqp->sr_flags & SMBR_ALLOCED)
164 pool_put(&smbrq_pool, rqp);
165 }
166
167 /*
168 * Simple request-reply exchange
169 */
170 int
171 smb_rq_simple(struct smb_rq *rqp)
172 {
173 int error, i;
174
175 for (i = 0; i < SMB_MAXRCN; i++) {
176 rqp->sr_flags &= ~SMBR_RESTART;
177 rqp->sr_state = SMBRQ_NOTSENT;
178 error = smb_rq_enqueue(rqp);
179 if (error)
180 return error;
181 error = smb_rq_reply(rqp);
182 if (!error)
183 break;
184 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
185 break;
186 }
187 return error;
188 }
189
190 int
191 smb_rq_enqueue(struct smb_rq *rqp)
192 {
193 struct smb_share *ssp = rqp->sr_share;
194 int error;
195
196 if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
197 return smb_iod_addrq(rqp);
198 }
199 for (;;) {
200 SMBS_ST_LOCK(ssp);
201 if (ssp->ss_flags & SMBS_RECONNECTING) {
202 SMBS_ST_UNLOCK(ssp);
203 error = ltsleep(&ssp->ss_vcgenid,
204 PWAIT | PCATCH | PNORELOCK,
205 "smbtrcn", hz, SMBS_ST_LOCKPTR(ssp));
206 if (error && error != EWOULDBLOCK)
207 return (error);
208 continue;
209 }
210 if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
211 SMBS_ST_UNLOCK(ssp);
212 } else {
213 SMBS_ST_UNLOCK(ssp);
214 error = smb_iod_request(rqp->sr_vc->vc_iod,
215 SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
216 if (error)
217 return error;
218 }
219 error = smb_iod_addrq(rqp);
220 if (error != EXDEV)
221 break;
222 }
223 return error;
224 }
225
226 void
227 smb_rq_wstart(struct smb_rq *rqp)
228 {
229 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
230 rqp->sr_rq.mb_count = 0;
231 }
232
233 void
234 smb_rq_wend(struct smb_rq *rqp)
235 {
236 #ifdef DIAGNOSTIC
237 if (rqp->sr_wcount == NULL)
238 panic("smb_rq_wend: no wcount");
239 if (rqp->sr_rq.mb_count & 1)
240 panic("smb_rq_wend: odd word count");
241 #endif
242 rqp->sr_wcount[0] = rqp->sr_rq.mb_count / 2;
243 }
244
245 void
246 smb_rq_bstart(struct smb_rq *rqp)
247 {
248 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_int16_t));
249 rqp->sr_rq.mb_count = 0;
250 }
251
252 void
253 smb_rq_bend(struct smb_rq *rqp)
254 {
255 u_int16_t bcnt = rqp->sr_rq.mb_count;
256
257 #ifdef DIAGNOSTIC
258 if (rqp->sr_bcount == NULL)
259 panic("smb_rq_bend: no bcount");
260 if (rqp->sr_rq.mb_count > 0xffff)
261 panic("smb_rq_bend: byte count too large (%d)", bcnt);
262 #endif
263 SMBRQ_PUTLE16(rqp->sr_bcount, bcnt);
264 }
265
266 int
267 smb_rq_intr(struct smb_rq *rqp)
268 {
269 struct proc *p = rqp->sr_cred->scr_p;
270
271 if (rqp->sr_flags & SMBR_INTR)
272 return EINTR;
273 return smb_proc_intr(p);
274 }
275
276 int
277 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
278 {
279 *mbpp = &rqp->sr_rq;
280 return 0;
281 }
282
283 int
284 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
285 {
286 *mbpp = &rqp->sr_rp;
287 return 0;
288 }
289
290 static int
291 smb_rq_getenv(struct smb_connobj *layer,
292 struct smb_vc **vcpp, struct smb_share **sspp)
293 {
294 struct smb_vc *vcp = NULL;
295 struct smb_share *ssp = NULL;
296 struct smb_connobj *cp;
297 int error = 0;
298
299 switch (layer->co_level) {
300 case SMBL_VC:
301 vcp = CPTOVC(layer);
302 if (layer->co_parent == NULL) {
303 SMBERROR("zombie VC %s\n", vcp->vc_srvname);
304 error = EINVAL;
305 break;
306 }
307 break;
308 case SMBL_SHARE:
309 ssp = CPTOSS(layer);
310 cp = layer->co_parent;
311 if (cp == NULL) {
312 SMBERROR("zombie share %s\n", ssp->ss_name);
313 error = EINVAL;
314 break;
315 }
316 error = smb_rq_getenv(cp, &vcp, NULL);
317 if (error)
318 break;
319 break;
320 default:
321 SMBERROR("invalid layer %d passed\n", layer->co_level);
322 error = EINVAL;
323 }
324 if (vcpp)
325 *vcpp = vcp;
326 if (sspp)
327 *sspp = ssp;
328 return error;
329 }
330
331 /*
332 * Wait for reply on the request
333 */
334 int
335 smb_rq_reply(struct smb_rq *rqp)
336 {
337 struct mdchain *mdp = &rqp->sr_rp;
338 int error;
339 u_int8_t errclass;
340 u_int16_t serror;
341
342 error = smb_iod_waitrq(rqp);
343 if (error)
344 return error;
345 error = md_get_uint32(mdp, NULL);
346 if (error)
347 return error;
348 (void) md_get_uint8(mdp, NULL);
349 if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
350 (void) md_get_uint32(mdp, NULL); /* XXX ignored? */
351 } else {
352 (void) md_get_uint8(mdp, &errclass);
353 (void) md_get_uint8(mdp, NULL);
354 error = md_get_uint16le(mdp, &serror);
355 if (!error)
356 error = smb_maperror(errclass, serror);
357 }
358 (void) md_get_uint8(mdp, NULL); /* rpflags */
359 (void) md_get_uint16(mdp, NULL); /* rpflags2 */
360
361 (void) md_get_uint32(mdp, NULL);
362 (void) md_get_uint32(mdp, NULL);
363 (void) md_get_uint32(mdp, NULL);
364
365 (void) md_get_uint16le(mdp, &rqp->sr_rptid);
366 (void) md_get_uint16le(mdp, &rqp->sr_rppid);
367 (void) md_get_uint16le(mdp, &rqp->sr_rpuid);
368 (void) md_get_uint16le(mdp, &rqp->sr_rpmid);
369
370 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
371 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
372 errclass, serror);
373 return (error);
374 }
375
376 void
377 smb_rq_setcallback(struct smb_rq *rqp, void (*recvcallb)(void *), void *arg)
378 {
379 SMBRQ_SLOCK(rqp);
380 rqp->sr_recvcallback = recvcallb;
381 rqp->sr_recvarg = arg;
382 SMBRQ_SUNLOCK(rqp);
383 }
384
385 #define ALIGN4(a) (((a) + 3) & ~3)
386
387 /*
388 * TRANS2 request implementation
389 */
390 int
391 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
392 struct smb_t2rq **t2pp)
393 {
394 struct smb_t2rq *t2p;
395 int error;
396
397 t2p = pool_get(&smbt2rq_pool, PR_WAITOK);
398 error = smb_t2_init(t2p, layer, setup, scred);
399 t2p->t2_flags |= SMBT2_ALLOCED;
400 if (error) {
401 smb_t2_done(t2p);
402 return error;
403 }
404 *t2pp = t2p;
405 return 0;
406 }
407
408 static int
409 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
410 struct smb_cred *scred)
411 {
412 int error;
413
414 bzero(t2p, sizeof(*t2p));
415 t2p->t2_source = source;
416 t2p->t2_setupcount = 1;
417 t2p->t2_setupdata = t2p->t2_setup;
418 t2p->t2_setup[0] = setup;
419 t2p->t2_fid = 0xffff;
420 t2p->t2_cred = scred;
421 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
422 if (error)
423 return error;
424 return 0;
425 }
426
427 void
428 smb_t2_done(struct smb_t2rq *t2p)
429 {
430 mb_done(&t2p->t2_tparam);
431 mb_done(&t2p->t2_tdata);
432 md_done(&t2p->t2_rparam);
433 md_done(&t2p->t2_rdata);
434 if (t2p->t2_flags & SMBT2_ALLOCED)
435 pool_put(&smbt2rq_pool, t2p);
436 }
437
438 static int
439 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
440 struct mdchain *mdp)
441 {
442 struct mbuf *m, *m0;
443 int len;
444
445 m0 = m_split(mtop, offset, M_WAIT);
446 if (m0 == NULL)
447 return EBADRPC;
448 for(len = 0, m = m0; m->m_next; m = m->m_next)
449 len += m->m_len;
450 len += m->m_len;
451 m->m_len -= len - count;
452 if (mdp->md_top == NULL) {
453 md_initm(mdp, m0);
454 } else
455 m_cat(mdp->md_top, m0);
456 return 0;
457 }
458
459 static int
460 smb_t2_reply(struct smb_t2rq *t2p)
461 {
462 struct mdchain *mdp;
463 struct smb_rq *rqp = t2p->t2_rq;
464 int error, totpgot, totdgot;
465 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
466 u_int16_t tmp, bc, dcount;
467 u_int8_t wc;
468
469 error = smb_rq_reply(rqp);
470 if (error)
471 return error;
472 if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
473 /*
474 * this is an interim response, ignore it.
475 */
476 SMBRQ_SLOCK(rqp);
477 md_next_record(&rqp->sr_rp);
478 SMBRQ_SUNLOCK(rqp);
479 return 0;
480 }
481 /*
482 * Now we have to get all subseqent responses. The CIFS specification
483 * says that they can be misordered which is weird.
484 * TODO: timo
485 */
486 totpgot = totdgot = 0;
487 totpcount = totdcount = 0xffff;
488 mdp = &rqp->sr_rp;
489 for (;;) {
490 m_dumpm(mdp->md_top);
491 if ((error = md_get_uint8(mdp, &wc)) != 0)
492 break;
493 if (wc < 10) {
494 error = ENOENT;
495 break;
496 }
497 if ((error = md_get_uint16le(mdp, &tmp)) != 0)
498 break;
499 if (totpcount > tmp)
500 totpcount = tmp;
501 md_get_uint16le(mdp, &tmp);
502 if (totdcount > tmp)
503 totdcount = tmp;
504 if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
505 (error = md_get_uint16le(mdp, &pcount)) != 0 ||
506 (error = md_get_uint16le(mdp, &poff)) != 0 ||
507 (error = md_get_uint16le(mdp, &pdisp)) != 0)
508 break;
509 if (pcount != 0 && pdisp != totpgot) {
510 SMBERROR("Can't handle misordered parameters %d:%d\n",
511 pdisp, totpgot);
512 error = EINVAL;
513 break;
514 }
515 if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
516 (error = md_get_uint16le(mdp, &doff)) != 0 ||
517 (error = md_get_uint16le(mdp, &ddisp)) != 0)
518 break;
519 if (dcount != 0 && ddisp != totdgot) {
520 SMBERROR("Can't handle misordered data\n");
521 error = EINVAL;
522 break;
523 }
524 md_get_uint8(mdp, &wc);
525 md_get_uint8(mdp, NULL);
526 tmp = wc;
527 while (tmp--)
528 md_get_uint16(mdp, NULL);
529 if ((error = md_get_uint16le(mdp, &bc)) != 0)
530 break;
531 /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
532 if (dcount) {
533 error = smb_t2_placedata(mdp->md_top, doff, dcount,
534 &t2p->t2_rdata);
535 if (error)
536 break;
537 }
538 if (pcount) {
539 error = smb_t2_placedata(mdp->md_top, poff, pcount,
540 &t2p->t2_rparam);
541 if (error)
542 break;
543 }
544 totpgot += pcount;
545 totdgot += dcount;
546 if (totpgot >= totpcount && totdgot >= totdcount) {
547 error = 0;
548 t2p->t2_flags |= SMBT2_ALLRECV;
549 break;
550 }
551 /*
552 * We're done with this reply, look for the next one.
553 */
554 SMBRQ_SLOCK(rqp);
555 md_next_record(&rqp->sr_rp);
556 SMBRQ_SUNLOCK(rqp);
557 error = smb_rq_reply(rqp);
558 if (error)
559 break;
560 }
561 return error;
562 }
563
564 /*
565 * Perform a full round of TRANS2 request
566 */
567 static int
568 smb_t2_request_int(struct smb_t2rq *t2p)
569 {
570 struct smb_vc *vcp = t2p->t2_vc;
571 struct smb_cred *scred = t2p->t2_cred;
572 struct mbchain *mbp;
573 struct mdchain *mdp, mbparam, mbdata;
574 struct mbuf *m;
575 struct smb_rq *rqp;
576 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
577 int error, doff, poff, txdcount, txpcount, nmlen;
578
579 m = t2p->t2_tparam.mb_top;
580 if (m) {
581 md_initm(&mbparam, m); /* do not free it! */
582 totpcount = m_fixhdr(m);
583 if (totpcount > 0xffff) /* maxvalue for u_short */
584 return EINVAL;
585 } else
586 totpcount = 0;
587 m = t2p->t2_tdata.mb_top;
588 if (m) {
589 md_initm(&mbdata, m); /* do not free it! */
590 totdcount = m_fixhdr(m);
591 if (totdcount > 0xffff)
592 return EINVAL;
593 } else
594 totdcount = 0;
595 leftdcount = totdcount;
596 leftpcount = totpcount;
597 txmax = vcp->vc_txmax;
598 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
599 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
600 if (error)
601 return error;
602 rqp->sr_flags |= SMBR_MULTIPACKET;
603 t2p->t2_rq = rqp;
604 mbp = &rqp->sr_rq;
605 smb_rq_wstart(rqp);
606 mb_put_uint16le(mbp, totpcount);
607 mb_put_uint16le(mbp, totdcount);
608 mb_put_uint16le(mbp, t2p->t2_maxpcount);
609 mb_put_uint16le(mbp, t2p->t2_maxdcount);
610 mb_put_uint8(mbp, t2p->t2_maxscount);
611 mb_put_uint8(mbp, 0); /* reserved */
612 mb_put_uint16le(mbp, 0); /* flags */
613 mb_put_uint32le(mbp, 0); /* Timeout */
614 mb_put_uint16le(mbp, 0); /* reserved 2 */
615 len = mb_fixhdr(mbp);
616 /*
617 * now we have known packet size as
618 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
619 * and need to decide which parts should go into the first request
620 */
621 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
622 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
623 if (len + leftpcount > txmax) {
624 txpcount = min(leftpcount, txmax - len);
625 poff = len;
626 txdcount = 0;
627 doff = 0;
628 } else {
629 txpcount = leftpcount;
630 poff = txpcount ? len : 0;
631 len = ALIGN4(len + txpcount);
632 txdcount = min(leftdcount, txmax - len);
633 doff = txdcount ? len : 0;
634 }
635 leftpcount -= txpcount;
636 leftdcount -= txdcount;
637 mb_put_uint16le(mbp, txpcount);
638 mb_put_uint16le(mbp, poff);
639 mb_put_uint16le(mbp, txdcount);
640 mb_put_uint16le(mbp, doff);
641 mb_put_uint8(mbp, t2p->t2_setupcount);
642 mb_put_uint8(mbp, 0);
643 for (i = 0; i < t2p->t2_setupcount; i++)
644 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
645 smb_rq_wend(rqp);
646 smb_rq_bstart(rqp);
647 /* TDUNICODE */
648 if (t2p->t_name)
649 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
650 mb_put_uint8(mbp, 0); /* terminating zero */
651 len = mb_fixhdr(mbp);
652 if (txpcount) {
653 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
654 error = md_get_mbuf(&mbparam, txpcount, &m);
655 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
656 if (error)
657 goto freerq;
658 mb_put_mbuf(mbp, m);
659 }
660 len = mb_fixhdr(mbp);
661 if (txdcount) {
662 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
663 error = md_get_mbuf(&mbdata, txdcount, &m);
664 if (error)
665 goto freerq;
666 mb_put_mbuf(mbp, m);
667 }
668 smb_rq_bend(rqp); /* incredible, but thats it... */
669 error = smb_rq_enqueue(rqp);
670 if (error)
671 goto freerq;
672 if (leftpcount == 0 && leftdcount == 0)
673 t2p->t2_flags |= SMBT2_ALLSENT;
674 error = smb_t2_reply(t2p);
675 if (error)
676 goto bad;
677 while (leftpcount || leftdcount) {
678 error = smb_rq_new(rqp, t2p->t_name ?
679 SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
680 if (error)
681 goto bad;
682 mbp = &rqp->sr_rq;
683 smb_rq_wstart(rqp);
684 mb_put_uint16le(mbp, totpcount);
685 mb_put_uint16le(mbp, totdcount);
686 len = mb_fixhdr(mbp);
687 /*
688 * now we have known packet size as
689 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
690 * and need to decide which parts should go into request
691 */
692 len = ALIGN4(len + 6 * 2 + 2);
693 if (t2p->t_name == NULL)
694 len += 2;
695 if (len + leftpcount > txmax) {
696 txpcount = min(leftpcount, txmax - len);
697 poff = len;
698 txdcount = 0;
699 doff = 0;
700 } else {
701 txpcount = leftpcount;
702 poff = txpcount ? len : 0;
703 len = ALIGN4(len + txpcount);
704 txdcount = min(leftdcount, txmax - len);
705 doff = txdcount ? len : 0;
706 }
707 mb_put_uint16le(mbp, txpcount);
708 mb_put_uint16le(mbp, poff);
709 mb_put_uint16le(mbp, totpcount - leftpcount);
710 mb_put_uint16le(mbp, txdcount);
711 mb_put_uint16le(mbp, doff);
712 mb_put_uint16le(mbp, totdcount - leftdcount);
713 leftpcount -= txpcount;
714 leftdcount -= txdcount;
715 if (t2p->t_name == NULL)
716 mb_put_uint16le(mbp, t2p->t2_fid);
717 smb_rq_wend(rqp);
718 smb_rq_bstart(rqp);
719 mb_put_uint8(mbp, 0); /* name */
720 len = mb_fixhdr(mbp);
721 if (txpcount) {
722 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
723 error = md_get_mbuf(&mbparam, txpcount, &m);
724 if (error)
725 goto bad;
726 mb_put_mbuf(mbp, m);
727 }
728 len = mb_fixhdr(mbp);
729 if (txdcount) {
730 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
731 error = md_get_mbuf(&mbdata, txdcount, &m);
732 if (error)
733 goto bad;
734 mb_put_mbuf(mbp, m);
735 }
736 smb_rq_bend(rqp);
737 rqp->sr_state = SMBRQ_NOTSENT;
738 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
739 if (error)
740 goto bad;
741 } /* while left params or data */
742 t2p->t2_flags |= SMBT2_ALLSENT;
743 mdp = &t2p->t2_rdata;
744 if (mdp->md_top) {
745 m_fixhdr(mdp->md_top);
746 md_initm(mdp, mdp->md_top);
747 }
748 mdp = &t2p->t2_rparam;
749 if (mdp->md_top) {
750 m_fixhdr(mdp->md_top);
751 md_initm(mdp, mdp->md_top);
752 }
753 bad:
754 smb_iod_removerq(rqp);
755 freerq:
756 smb_rq_done(rqp);
757 if (error) {
758 if (rqp->sr_flags & SMBR_RESTART)
759 t2p->t2_flags |= SMBT2_RESTART;
760 md_done(&t2p->t2_rparam);
761 md_done(&t2p->t2_rdata);
762 }
763 return error;
764 }
765
766 int
767 smb_t2_request(struct smb_t2rq *t2p)
768 {
769 int error = EINVAL, i;
770
771 for (i = 0; i < SMB_MAXRCN; i++) {
772 t2p->t2_flags &= ~SMBT2_RESTART;
773 error = smb_t2_request_int(t2p);
774 if (error == 0)
775 break;
776 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
777 break;
778 }
779 return error;
780 }
Cache object: 53795fcc2543e7da64c283c774e4ee1b
|