FreeBSD/Linux Kernel Cross Reference
sys/netsmb/smb_rq.c
1 /*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/sysctl.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/mbuf.h>
47
48 #include <netsmb/smb.h>
49 #include <netsmb/smb_conn.h>
50 #include <netsmb/smb_rq.h>
51 #include <netsmb/smb_subr.h>
52 #include <netsmb/smb_tran.h>
53
54 MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
55
56 MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
57
58 static int smb_rq_reply(struct smb_rq *rqp);
59 static int smb_rq_enqueue(struct smb_rq *rqp);
60 static int smb_rq_getenv(struct smb_connobj *layer,
61 struct smb_vc **vcpp, struct smb_share **sspp);
62 static int smb_rq_new(struct smb_rq *rqp, u_char cmd);
63 static int smb_t2_reply(struct smb_t2rq *t2p);
64
65 int
66 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
67 struct smb_rq **rqpp)
68 {
69 struct smb_rq *rqp;
70 int error;
71
72 MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK);
73 if (rqp == NULL)
74 return ENOMEM;
75 error = smb_rq_init(rqp, layer, cmd, scred);
76 rqp->sr_flags |= SMBR_ALLOCED;
77 if (error) {
78 smb_rq_done(rqp);
79 return error;
80 }
81 *rqpp = rqp;
82 return 0;
83 }
84
85 static char tzero[12];
86
87 int
88 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
89 struct smb_cred *scred)
90 {
91 int error;
92
93 bzero(rqp, sizeof(*rqp));
94 smb_sl_init(&rqp->sr_slock, "srslock");
95 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
96 if (error)
97 return error;
98 error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
99 if (error)
100 return error;
101 if (rqp->sr_share) {
102 error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
103 if (error)
104 return error;
105 }
106 rqp->sr_cred = scred;
107 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
108 return smb_rq_new(rqp, cmd);
109 }
110
111 static int
112 smb_rq_new(struct smb_rq *rqp, u_char cmd)
113 {
114 struct smb_vc *vcp = rqp->sr_vc;
115 struct mbchain *mbp = &rqp->sr_rq;
116 int error;
117 u_int16_t flags2;
118
119 rqp->sr_sendcnt = 0;
120 mb_done(mbp);
121 md_done(&rqp->sr_rp);
122 error = mb_init(mbp);
123 if (error)
124 return error;
125 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
126 mb_put_uint8(mbp, cmd);
127 mb_put_uint32le(mbp, 0); /* DosError */
128 mb_put_uint8(mbp, vcp->vc_hflags);
129 flags2 = vcp->vc_hflags2;
130 if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY)
131 flags2 &= ~SMB_FLAGS2_UNICODE;
132 if (cmd == SMB_COM_NEGOTIATE)
133 flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE;
134 mb_put_uint16le(mbp, flags2);
135 if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) {
136 mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
137 rqp->sr_rqsig = NULL;
138 } else {
139 mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/);
140 rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8);
141 mb_put_uint16le(mbp, 0);
142 }
143 rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
144 mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
145 rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
146 mb_put_uint16le(mbp, rqp->sr_mid);
147 return 0;
148 }
149
150 void
151 smb_rq_done(struct smb_rq *rqp)
152 {
153 mb_done(&rqp->sr_rq);
154 md_done(&rqp->sr_rp);
155 smb_sl_destroy(&rqp->sr_slock);
156 if (rqp->sr_flags & SMBR_ALLOCED)
157 free(rqp, M_SMBRQ);
158 }
159
160 /*
161 * Simple request-reply exchange
162 */
163 int
164 smb_rq_simple(struct smb_rq *rqp)
165 {
166 struct smb_vc *vcp = rqp->sr_vc;
167 int error = EINVAL, i;
168
169 for (i = 0; i < SMB_MAXRCN; i++) {
170 rqp->sr_flags &= ~SMBR_RESTART;
171 rqp->sr_timo = vcp->vc_timo;
172 rqp->sr_state = SMBRQ_NOTSENT;
173 error = smb_rq_enqueue(rqp);
174 if (error)
175 return error;
176 error = smb_rq_reply(rqp);
177 if (error == 0)
178 break;
179 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
180 break;
181 }
182 return error;
183 }
184
185 static int
186 smb_rq_enqueue(struct smb_rq *rqp)
187 {
188 struct smb_share *ssp = rqp->sr_share;
189 int error;
190
191 if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
192 return smb_iod_addrq(rqp);
193 }
194 for (;;) {
195 SMBS_ST_LOCK(ssp);
196 if (ssp->ss_flags & SMBS_RECONNECTING) {
197 msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
198 PWAIT | PDROP, "90trcn", hz);
199 if (smb_proc_intr(rqp->sr_cred->scr_p))
200 return EINTR;
201 continue;
202 }
203 if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
204 SMBS_ST_UNLOCK(ssp);
205 } else {
206 SMBS_ST_UNLOCK(ssp);
207 error = smb_iod_request(rqp->sr_vc->vc_iod,
208 SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
209 if (error)
210 return error;
211 }
212 error = smb_iod_addrq(rqp);
213 if (error != EXDEV)
214 break;
215 }
216 return error;
217 }
218
219 void
220 smb_rq_wstart(struct smb_rq *rqp)
221 {
222 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
223 rqp->sr_rq.mb_count = 0;
224 }
225
226 void
227 smb_rq_wend(struct smb_rq *rqp)
228 {
229 if (rqp->sr_wcount == NULL) {
230 SMBERROR("no wcount\n"); /* actually panic */
231 return;
232 }
233 if (rqp->sr_rq.mb_count & 1)
234 SMBERROR("odd word count\n");
235 *rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
236 }
237
238 void
239 smb_rq_bstart(struct smb_rq *rqp)
240 {
241 rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short));
242 rqp->sr_rq.mb_count = 0;
243 }
244
245 void
246 smb_rq_bend(struct smb_rq *rqp)
247 {
248 int bcnt;
249
250 if (rqp->sr_bcount == NULL) {
251 SMBERROR("no bcount\n"); /* actually panic */
252 return;
253 }
254 bcnt = rqp->sr_rq.mb_count;
255 if (bcnt > 0xffff)
256 SMBERROR("byte count too large (%d)\n", bcnt);
257 *rqp->sr_bcount = htole16(bcnt);
258 }
259
260 int
261 smb_rq_intr(struct smb_rq *rqp)
262 {
263 struct proc *p = rqp->sr_cred->scr_p;
264
265 if (rqp->sr_flags & SMBR_INTR)
266 return EINTR;
267 return smb_proc_intr(p);
268 }
269
270 int
271 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
272 {
273 *mbpp = &rqp->sr_rq;
274 return 0;
275 }
276
277 int
278 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
279 {
280 *mbpp = &rqp->sr_rp;
281 return 0;
282 }
283
284 static int
285 smb_rq_getenv(struct smb_connobj *layer,
286 struct smb_vc **vcpp, struct smb_share **sspp)
287 {
288 struct smb_vc *vcp = NULL;
289 struct smb_share *ssp = NULL;
290 struct smb_connobj *cp;
291 int error = 0;
292
293 switch (layer->co_level) {
294 case SMBL_VC:
295 vcp = CPTOVC(layer);
296 if (layer->co_parent == NULL) {
297 SMBERROR("zombie VC %s\n", vcp->vc_srvname);
298 error = EINVAL;
299 break;
300 }
301 break;
302 case SMBL_SHARE:
303 ssp = CPTOSS(layer);
304 cp = layer->co_parent;
305 if (cp == NULL) {
306 SMBERROR("zombie share %s\n", ssp->ss_name);
307 error = EINVAL;
308 break;
309 }
310 error = smb_rq_getenv(cp, &vcp, NULL);
311 if (error)
312 break;
313 break;
314 default:
315 SMBERROR("invalid layer %d passed\n", layer->co_level);
316 error = EINVAL;
317 }
318 if (vcpp)
319 *vcpp = vcp;
320 if (sspp)
321 *sspp = ssp;
322 return error;
323 }
324
325 /*
326 * Wait for reply on the request
327 */
328 static int
329 smb_rq_reply(struct smb_rq *rqp)
330 {
331 struct mdchain *mdp = &rqp->sr_rp;
332 u_int32_t tdw;
333 u_int8_t tb;
334 int error, rperror = 0;
335
336 error = smb_iod_waitrq(rqp);
337 if (error)
338 return error;
339 error = md_get_uint32(mdp, &tdw);
340 if (error)
341 return error;
342 error = md_get_uint8(mdp, &tb);
343 if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
344 error = md_get_uint32le(mdp, &rqp->sr_error);
345 } else {
346 error = md_get_uint8(mdp, &rqp->sr_errclass);
347 error = md_get_uint8(mdp, &tb);
348 error = md_get_uint16le(mdp, &rqp->sr_serror);
349 if (!error)
350 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
351 }
352 error = md_get_uint8(mdp, &rqp->sr_rpflags);
353 error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
354
355 error = md_get_uint32(mdp, &tdw);
356 error = md_get_uint32(mdp, &tdw);
357 error = md_get_uint32(mdp, &tdw);
358
359 error = md_get_uint16le(mdp, &rqp->sr_rptid);
360 error = md_get_uint16le(mdp, &rqp->sr_rppid);
361 error = md_get_uint16le(mdp, &rqp->sr_rpuid);
362 error = md_get_uint16le(mdp, &rqp->sr_rpmid);
363
364 if (error == 0 &&
365 (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE))
366 error = smb_rq_verify(rqp);
367
368 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
369 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
370 rqp->sr_errclass, rqp->sr_serror);
371 return error ? error : rperror;
372 }
373
374
375 #define ALIGN4(a) (((a) + 3) & ~3)
376
377 /*
378 * TRANS2 request implementation
379 */
380 int
381 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
382 struct smb_t2rq **t2pp)
383 {
384 struct smb_t2rq *t2p;
385 int error;
386
387 MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK);
388 if (t2p == NULL)
389 return ENOMEM;
390 error = smb_t2_init(t2p, layer, setup, scred);
391 t2p->t2_flags |= SMBT2_ALLOCED;
392 if (error) {
393 smb_t2_done(t2p);
394 return error;
395 }
396 *t2pp = t2p;
397 return 0;
398 }
399
400 int
401 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
402 struct smb_cred *scred)
403 {
404 int error;
405
406 bzero(t2p, sizeof(*t2p));
407 t2p->t2_source = source;
408 t2p->t2_setupcount = 1;
409 t2p->t2_setupdata = t2p->t2_setup;
410 t2p->t2_setup[0] = setup;
411 t2p->t2_fid = 0xffff;
412 t2p->t2_cred = scred;
413 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
414 if (error)
415 return error;
416 return 0;
417 }
418
419 void
420 smb_t2_done(struct smb_t2rq *t2p)
421 {
422 mb_done(&t2p->t2_tparam);
423 mb_done(&t2p->t2_tdata);
424 md_done(&t2p->t2_rparam);
425 md_done(&t2p->t2_rdata);
426 if (t2p->t2_flags & SMBT2_ALLOCED)
427 free(t2p, M_SMBRQ);
428 }
429
430 static int
431 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
432 struct mdchain *mdp)
433 {
434 struct mbuf *m, *m0;
435 int len;
436
437 m0 = m_split(mtop, offset, M_WAITOK);
438 if (m0 == NULL)
439 return EBADRPC;
440 len = m_length(m0, &m);
441 m->m_len -= len - count;
442 if (mdp->md_top == NULL) {
443 md_initm(mdp, m0);
444 } else
445 m_cat(mdp->md_top, m0);
446 return 0;
447 }
448
449 static int
450 smb_t2_reply(struct smb_t2rq *t2p)
451 {
452 struct mdchain *mdp;
453 struct smb_rq *rqp = t2p->t2_rq;
454 int error, totpgot, totdgot;
455 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
456 u_int16_t tmp, bc, dcount;
457 u_int8_t wc;
458
459 error = smb_rq_reply(rqp);
460 if (error)
461 return error;
462 if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
463 /*
464 * this is an interim response, ignore it.
465 */
466 SMBRQ_SLOCK(rqp);
467 md_next_record(&rqp->sr_rp);
468 SMBRQ_SUNLOCK(rqp);
469 return 0;
470 }
471 /*
472 * Now we have to get all subsequent responses. The CIFS specification
473 * says that they can be disordered which is weird.
474 * TODO: timo
475 */
476 totpgot = totdgot = 0;
477 totpcount = totdcount = 0xffff;
478 mdp = &rqp->sr_rp;
479 for (;;) {
480 m_dumpm(mdp->md_top);
481 if ((error = md_get_uint8(mdp, &wc)) != 0)
482 break;
483 if (wc < 10) {
484 error = ENOENT;
485 break;
486 }
487 if ((error = md_get_uint16le(mdp, &tmp)) != 0)
488 break;
489 if (totpcount > tmp)
490 totpcount = tmp;
491 md_get_uint16le(mdp, &tmp);
492 if (totdcount > tmp)
493 totdcount = tmp;
494 if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
495 (error = md_get_uint16le(mdp, &pcount)) != 0 ||
496 (error = md_get_uint16le(mdp, &poff)) != 0 ||
497 (error = md_get_uint16le(mdp, &pdisp)) != 0)
498 break;
499 if (pcount != 0 && pdisp != totpgot) {
500 SMBERROR("Can't handle disordered parameters %d:%d\n",
501 pdisp, totpgot);
502 error = EINVAL;
503 break;
504 }
505 if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
506 (error = md_get_uint16le(mdp, &doff)) != 0 ||
507 (error = md_get_uint16le(mdp, &ddisp)) != 0)
508 break;
509 if (dcount != 0 && ddisp != totdgot) {
510 SMBERROR("Can't handle disordered data\n");
511 error = EINVAL;
512 break;
513 }
514 md_get_uint8(mdp, &wc);
515 md_get_uint8(mdp, NULL);
516 tmp = wc;
517 while (tmp--)
518 md_get_uint16(mdp, NULL);
519 if ((error = md_get_uint16le(mdp, &bc)) != 0)
520 break;
521 /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
522 if (dcount) {
523 error = smb_t2_placedata(mdp->md_top, doff, dcount,
524 &t2p->t2_rdata);
525 if (error)
526 break;
527 }
528 if (pcount) {
529 error = smb_t2_placedata(mdp->md_top, poff, pcount,
530 &t2p->t2_rparam);
531 if (error)
532 break;
533 }
534 totpgot += pcount;
535 totdgot += dcount;
536 if (totpgot >= totpcount && totdgot >= totdcount) {
537 error = 0;
538 t2p->t2_flags |= SMBT2_ALLRECV;
539 break;
540 }
541 /*
542 * We're done with this reply, look for the next one.
543 */
544 SMBRQ_SLOCK(rqp);
545 md_next_record(&rqp->sr_rp);
546 SMBRQ_SUNLOCK(rqp);
547 error = smb_rq_reply(rqp);
548 if (error)
549 break;
550 }
551 return error;
552 }
553
554 /*
555 * Perform a full round of TRANS2 request
556 */
557 static int
558 smb_t2_request_int(struct smb_t2rq *t2p)
559 {
560 struct smb_vc *vcp = t2p->t2_vc;
561 struct smb_cred *scred = t2p->t2_cred;
562 struct mbchain *mbp;
563 struct mdchain *mdp, mbparam, mbdata;
564 struct mbuf *m;
565 struct smb_rq *rqp;
566 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
567 int error, doff, poff, txdcount, txpcount, nmlen;
568
569 m = t2p->t2_tparam.mb_top;
570 if (m) {
571 md_initm(&mbparam, m); /* do not free it! */
572 totpcount = m_fixhdr(m);
573 if (totpcount > 0xffff) /* maxvalue for u_short */
574 return EINVAL;
575 } else
576 totpcount = 0;
577 m = t2p->t2_tdata.mb_top;
578 if (m) {
579 md_initm(&mbdata, m); /* do not free it! */
580 totdcount = m_fixhdr(m);
581 if (totdcount > 0xffff)
582 return EINVAL;
583 } else
584 totdcount = 0;
585 leftdcount = totdcount;
586 leftpcount = totpcount;
587 txmax = vcp->vc_txmax;
588 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
589 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
590 if (error)
591 return error;
592 rqp->sr_flags |= SMBR_MULTIPACKET;
593 t2p->t2_rq = rqp;
594 rqp->sr_t2 = t2p;
595 mbp = &rqp->sr_rq;
596 smb_rq_wstart(rqp);
597 mb_put_uint16le(mbp, totpcount);
598 mb_put_uint16le(mbp, totdcount);
599 mb_put_uint16le(mbp, t2p->t2_maxpcount);
600 mb_put_uint16le(mbp, t2p->t2_maxdcount);
601 mb_put_uint8(mbp, t2p->t2_maxscount);
602 mb_put_uint8(mbp, 0); /* reserved */
603 mb_put_uint16le(mbp, 0); /* flags */
604 mb_put_uint32le(mbp, 0); /* Timeout */
605 mb_put_uint16le(mbp, 0); /* reserved 2 */
606 len = mb_fixhdr(mbp);
607 /*
608 * now we have known packet size as
609 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
610 * and need to decide which parts should go into the first request
611 */
612 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
613 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
614 if (len + leftpcount > txmax) {
615 txpcount = min(leftpcount, txmax - len);
616 poff = len;
617 txdcount = 0;
618 doff = 0;
619 } else {
620 txpcount = leftpcount;
621 poff = txpcount ? len : 0;
622 len = ALIGN4(len + txpcount);
623 txdcount = min(leftdcount, txmax - len);
624 doff = txdcount ? len : 0;
625 }
626 leftpcount -= txpcount;
627 leftdcount -= txdcount;
628 mb_put_uint16le(mbp, txpcount);
629 mb_put_uint16le(mbp, poff);
630 mb_put_uint16le(mbp, txdcount);
631 mb_put_uint16le(mbp, doff);
632 mb_put_uint8(mbp, t2p->t2_setupcount);
633 mb_put_uint8(mbp, 0);
634 for (i = 0; i < t2p->t2_setupcount; i++)
635 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
636 smb_rq_wend(rqp);
637 smb_rq_bstart(rqp);
638 /* TDUNICODE */
639 if (t2p->t_name)
640 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
641 mb_put_uint8(mbp, 0); /* terminating zero */
642 len = mb_fixhdr(mbp);
643 if (txpcount) {
644 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
645 error = md_get_mbuf(&mbparam, txpcount, &m);
646 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
647 if (error)
648 goto freerq;
649 mb_put_mbuf(mbp, m);
650 }
651 len = mb_fixhdr(mbp);
652 if (txdcount) {
653 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
654 error = md_get_mbuf(&mbdata, txdcount, &m);
655 if (error)
656 goto freerq;
657 mb_put_mbuf(mbp, m);
658 }
659 smb_rq_bend(rqp); /* incredible, but thats it... */
660 error = smb_rq_enqueue(rqp);
661 if (error)
662 goto freerq;
663 if (leftpcount == 0 && leftdcount == 0)
664 t2p->t2_flags |= SMBT2_ALLSENT;
665 error = smb_t2_reply(t2p);
666 if (error)
667 goto bad;
668 while (leftpcount || leftdcount) {
669 t2p->t2_flags |= SMBT2_SECONDARY;
670 error = smb_rq_new(rqp, t2p->t_name ?
671 SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
672 if (error)
673 goto bad;
674 mbp = &rqp->sr_rq;
675 smb_rq_wstart(rqp);
676 mb_put_uint16le(mbp, totpcount);
677 mb_put_uint16le(mbp, totdcount);
678 len = mb_fixhdr(mbp);
679 /*
680 * now we have known packet size as
681 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
682 * and need to decide which parts should go into request
683 */
684 len = ALIGN4(len + 6 * 2 + 2);
685 if (t2p->t_name == NULL)
686 len += 2;
687 if (len + leftpcount > txmax) {
688 txpcount = min(leftpcount, txmax - len);
689 poff = len;
690 txdcount = 0;
691 doff = 0;
692 } else {
693 txpcount = leftpcount;
694 poff = txpcount ? len : 0;
695 len = ALIGN4(len + txpcount);
696 txdcount = min(leftdcount, txmax - len);
697 doff = txdcount ? len : 0;
698 }
699 mb_put_uint16le(mbp, txpcount);
700 mb_put_uint16le(mbp, poff);
701 mb_put_uint16le(mbp, totpcount - leftpcount);
702 mb_put_uint16le(mbp, txdcount);
703 mb_put_uint16le(mbp, doff);
704 mb_put_uint16le(mbp, totdcount - leftdcount);
705 leftpcount -= txpcount;
706 leftdcount -= txdcount;
707 if (t2p->t_name == NULL)
708 mb_put_uint16le(mbp, t2p->t2_fid);
709 smb_rq_wend(rqp);
710 smb_rq_bstart(rqp);
711 mb_put_uint8(mbp, 0); /* name */
712 len = mb_fixhdr(mbp);
713 if (txpcount) {
714 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
715 error = md_get_mbuf(&mbparam, txpcount, &m);
716 if (error)
717 goto bad;
718 mb_put_mbuf(mbp, m);
719 }
720 len = mb_fixhdr(mbp);
721 if (txdcount) {
722 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
723 error = md_get_mbuf(&mbdata, txdcount, &m);
724 if (error)
725 goto bad;
726 mb_put_mbuf(mbp, m);
727 }
728 smb_rq_bend(rqp);
729 rqp->sr_state = SMBRQ_NOTSENT;
730 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
731 if (error)
732 goto bad;
733 } /* while left params or data */
734 t2p->t2_flags |= SMBT2_ALLSENT;
735 mdp = &t2p->t2_rdata;
736 if (mdp->md_top) {
737 m_fixhdr(mdp->md_top);
738 md_initm(mdp, mdp->md_top);
739 }
740 mdp = &t2p->t2_rparam;
741 if (mdp->md_top) {
742 m_fixhdr(mdp->md_top);
743 md_initm(mdp, mdp->md_top);
744 }
745 bad:
746 smb_iod_removerq(rqp);
747 freerq:
748 smb_rq_done(rqp);
749 if (error) {
750 if (rqp->sr_flags & SMBR_RESTART)
751 t2p->t2_flags |= SMBT2_RESTART;
752 md_done(&t2p->t2_rparam);
753 md_done(&t2p->t2_rdata);
754 }
755 return error;
756 }
757
758 int
759 smb_t2_request(struct smb_t2rq *t2p)
760 {
761 int error = EINVAL, i;
762
763 for (i = 0; i < SMB_MAXRCN; i++) {
764 t2p->t2_flags &= ~SMBR_RESTART;
765 error = smb_t2_request_int(t2p);
766 if (error == 0)
767 break;
768 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
769 break;
770 }
771 return error;
772 }
Cache object: f3e24a192aa4774220e413e7dbde8a1a
|