1 /*	$NetBSD: smb_rq.c,v 1.34 2010/12/17 13:05:29 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2001, Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *    This product includes software developed by Boris Popov.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * FreeBSD: src/sys/netsmb/smb_rq.c,v 1.4 2001/12/09 17:48:08 arr Exp
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: smb_rq.c,v 1.34 2010/12/17 13:05:29 pooka Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/mbuf.h>
49 
50 #include <netsmb/smb.h>
51 #include <netsmb/smb_conn.h>
52 #include <netsmb/smb_rq.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_tran.h>
55 
56 
57 static int  smb_rq_init(struct smb_rq *, struct smb_connobj *, u_char,
58 		struct smb_cred *);
59 static int  smb_rq_getenv(struct smb_connobj *layer,
60 		struct smb_vc **vcpp, struct smb_share **sspp);
61 static int  smb_rq_new(struct smb_rq *rqp, u_char cmd);
62 static int  smb_t2_init(struct smb_t2rq *, struct smb_connobj *, u_short,
63 		struct smb_cred *);
64 static int  smb_t2_reply(struct smb_t2rq *t2p);
65 
66 static struct pool smbrq_pool, smbt2rq_pool;
67 
68 void
smb_rqpool_init(void)69 smb_rqpool_init(void)
70 {
71 
72 	pool_init(&smbrq_pool, sizeof(struct smb_rq), 0, 0, 0, "smbrqpl",
73 	    &pool_allocator_nointr, IPL_NONE);
74 	pool_init(&smbt2rq_pool, sizeof(struct smb_t2rq), 0, 0, 0, "smbt2pl",
75 	    &pool_allocator_nointr, IPL_NONE);
76 }
77 
78 void
smb_rqpool_fini(void)79 smb_rqpool_fini(void)
80 {
81 
82 	pool_destroy(&smbrq_pool);
83 	pool_destroy(&smbt2rq_pool);
84 }
85 
86 int
smb_rq_alloc(struct smb_connobj * layer,u_char cmd,struct smb_cred * scred,struct smb_rq ** rqpp)87 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
88 	struct smb_rq **rqpp)
89 {
90 	struct smb_rq *rqp;
91 	int error;
92 
93 	rqp = pool_get(&smbrq_pool, PR_WAITOK);
94 	error = smb_rq_init(rqp, layer, cmd, scred);
95 	rqp->sr_flags |= SMBR_ALLOCED;
96 	callout_init(&rqp->sr_timo_ch, 0);
97 	if (error) {
98 		smb_rq_done(rqp);
99 		return error;
100 	}
101 	*rqpp = rqp;
102 	return 0;
103 }
104 
105 static int
smb_rq_init(struct smb_rq * rqp,struct smb_connobj * layer,u_char cmd,struct smb_cred * scred)106 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
107 	struct smb_cred *scred)
108 {
109 	int error;
110 	struct timeval timo;
111 
112 	memset(rqp, 0, sizeof(*rqp));
113 	smb_sl_init(&rqp->sr_slock, "srslock");
114 	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
115 	if (error)
116 		return error;
117 	error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
118 	if (error)
119 		return error;
120 	if (rqp->sr_share) {
121 		error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
122 		if (error)
123 			return error;
124 	}
125 	rqp->sr_cred = scred;
126 	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
127 	SMB_TRAN_GETPARAM(rqp->sr_vc, SMBTP_TIMEOUT, &timo);
128 	rqp->sr_timo = timo.tv_sec * hz;
129 	return smb_rq_new(rqp, cmd);
130 }
131 
132 static int
smb_rq_new(struct smb_rq * rqp,u_char cmd)133 smb_rq_new(struct smb_rq *rqp, u_char cmd)
134 {
135 	struct smb_vc *vcp = rqp->sr_vc;
136 	struct mbchain *mbp = &rqp->sr_rq;
137 	int error;
138 
139 	rqp->sr_sendcnt = 0;
140 	mb_done(mbp);
141 	md_done(&rqp->sr_rp);
142 	error = mb_init(mbp);
143 	if (error)
144 		return error;
145 	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
146 	mb_put_uint8(mbp, cmd);
147 	mb_put_uint32le(mbp, 0);		/* DosError */
148 	mb_put_uint8(mbp, vcp->vc_hflags);
149 	mb_put_uint16le(mbp, vcp->vc_hflags2);
150 	mb_put_mem(mbp, NULL, 12, MB_MZERO);
151 	rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
152 	/*
153 	 * SMB packet PID is used for lock validation. Besides that,
154 	 * it's opaque for the server.
155 	 */
156 	mb_put_uint16le(mbp, 1 /*rqp->sr_cred->scr_p->p_pid & 0xffff*/);
157 	rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
158 	mb_put_uint16le(mbp, rqp->sr_mid);
159 	return 0;
160 }
161 
162 void
smb_rq_done(struct smb_rq * rqp)163 smb_rq_done(struct smb_rq *rqp)
164 {
165 	mb_done(&rqp->sr_rq);
166 	md_done(&rqp->sr_rp);
167 	smb_sl_destroy(&rqp->sr_slock);
168 	if (rqp->sr_flags & SMBR_ALLOCED) {
169 		callout_destroy(&rqp->sr_timo_ch);
170 		pool_put(&smbrq_pool, rqp);
171 	}
172 }
173 
174 /*
175  * Simple request-reply exchange
176  */
177 int
smb_rq_simple(struct smb_rq * rqp)178 smb_rq_simple(struct smb_rq *rqp)
179 {
180 	int error, i;
181 
182 	for (i = 0; i < SMB_MAXRCN; i++) {
183 		rqp->sr_flags &= ~SMBR_RESTART;
184 		rqp->sr_state = SMBRQ_NOTSENT;
185 		error = smb_rq_enqueue(rqp);
186 		if (error)
187 			return error;
188 		error = smb_rq_reply(rqp);
189 		if (!error)
190 			break;
191 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
192 			break;
193 	}
194 	return error;
195 }
196 
197 int
smb_rq_enqueue(struct smb_rq * rqp)198 smb_rq_enqueue(struct smb_rq *rqp)
199 {
200 	struct smb_share *ssp = rqp->sr_share;
201 	int error;
202 
203 	if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
204 		return smb_iod_addrq(rqp);
205 	}
206 	for (;;) {
207 		SMBS_ST_LOCK(ssp);
208 		if (ssp->ss_flags & SMBS_RECONNECTING) {
209 			SMBS_ST_UNLOCK(ssp);
210 			error = mtsleep(&ssp->ss_vcgenid,
211 				PWAIT | PCATCH | PNORELOCK,
212 				"smbtrcn", hz, SMBS_ST_LOCKPTR(ssp));
213 			if (error && error != EWOULDBLOCK)
214 				return (error);
215 			continue;
216 		}
217 		if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
218 			SMBS_ST_UNLOCK(ssp);
219 		} else {
220 			SMBS_ST_UNLOCK(ssp);
221 			error = smb_iod_request(rqp->sr_vc->vc_iod,
222 			    SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
223 			if (error)
224 				return error;
225 		}
226 		error = smb_iod_addrq(rqp);
227 		if (error != EXDEV)
228 			break;
229 	}
230 	return error;
231 }
232 
233 void
smb_rq_wstart(struct smb_rq * rqp)234 smb_rq_wstart(struct smb_rq *rqp)
235 {
236 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
237 	rqp->sr_rq.mb_count = 0;
238 }
239 
240 void
smb_rq_wend(struct smb_rq * rqp)241 smb_rq_wend(struct smb_rq *rqp)
242 {
243 #ifdef DIAGNOSTIC
244 	if (rqp->sr_wcount == NULL)
245 		panic("smb_rq_wend: no wcount");
246 	if (rqp->sr_rq.mb_count & 1)
247 		panic("smb_rq_wend: odd word count");
248 #endif
249 	rqp->sr_wcount[0] = rqp->sr_rq.mb_count / 2;
250 }
251 
252 void
smb_rq_bstart(struct smb_rq * rqp)253 smb_rq_bstart(struct smb_rq *rqp)
254 {
255 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_int16_t));
256 	rqp->sr_rq.mb_count = 0;
257 }
258 
259 void
smb_rq_bend(struct smb_rq * rqp)260 smb_rq_bend(struct smb_rq *rqp)
261 {
262 	u_int16_t bcnt = rqp->sr_rq.mb_count;
263 
264 #ifdef DIAGNOSTIC
265 	if (rqp->sr_bcount == NULL)
266 		panic("smb_rq_bend: no bcount");
267 	if (rqp->sr_rq.mb_count > 0xffff)
268 		panic("smb_rq_bend: byte count too large (%d)", bcnt);
269 #endif
270 	SMBRQ_PUTLE16(rqp->sr_bcount, bcnt);
271 }
272 
273 int
smb_rq_intr(struct smb_rq * rqp)274 smb_rq_intr(struct smb_rq *rqp)
275 {
276 	struct lwp *l = rqp->sr_cred->scr_l;
277 
278 	if (rqp->sr_flags & SMBR_INTR)
279 		return EINTR;
280 	return smb_proc_intr(l);
281 }
282 
283 int
smb_rq_getrequest(struct smb_rq * rqp,struct mbchain ** mbpp)284 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
285 {
286 	*mbpp = &rqp->sr_rq;
287 	return 0;
288 }
289 
290 int
smb_rq_getreply(struct smb_rq * rqp,struct mdchain ** mbpp)291 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
292 {
293 	*mbpp = &rqp->sr_rp;
294 	return 0;
295 }
296 
297 static int
smb_rq_getenv(struct smb_connobj * layer,struct smb_vc ** vcpp,struct smb_share ** sspp)298 smb_rq_getenv(struct smb_connobj *layer,
299 	struct smb_vc **vcpp, struct smb_share **sspp)
300 {
301 	struct smb_vc *vcp = NULL;
302 	struct smb_share *ssp = NULL;
303 	struct smb_connobj *cp;
304 	int error = 0;
305 
306 	switch (layer->co_level) {
307 	    case SMBL_VC:
308 		vcp = CPTOVC(layer);
309 		if (layer->co_parent == NULL) {
310 			SMBERROR(("zombie VC %s\n", vcp->vc_srvname));
311 			error = EINVAL;
312 			break;
313 		}
314 		break;
315 	    case SMBL_SHARE:
316 		ssp = CPTOSS(layer);
317 		cp = layer->co_parent;
318 		if (cp == NULL) {
319 			SMBERROR(("zombie share %s\n", ssp->ss_name));
320 			error = EINVAL;
321 			break;
322 		}
323 		error = smb_rq_getenv(cp, &vcp, NULL);
324 		if (error)
325 			break;
326 		break;
327 	    default:
328 		SMBERROR(("invalid layer %d passed\n", layer->co_level));
329 		error = EINVAL;
330 	}
331 	if (vcpp)
332 		*vcpp = vcp;
333 	if (sspp)
334 		*sspp = ssp;
335 	return error;
336 }
337 
338 /*
339  * Wait for reply on the request
340  */
341 int
smb_rq_reply(struct smb_rq * rqp)342 smb_rq_reply(struct smb_rq *rqp)
343 {
344 	struct mdchain *mdp = &rqp->sr_rp;
345 	int error;
346 	u_int8_t errclass;
347 	u_int16_t serror;
348 
349 	error = smb_iod_waitrq(rqp);
350 	if (error)
351 		return error;
352 	error = md_get_uint32(mdp, NULL);
353 	if (error)
354 		return error;
355 	(void) md_get_uint8(mdp, NULL);
356 	if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
357 		(void) md_get_uint32(mdp, NULL);	/* XXX ignored? */
358 	} else {
359 		(void) md_get_uint8(mdp, &errclass);
360 		(void) md_get_uint8(mdp, NULL);
361 		error = md_get_uint16le(mdp, &serror);
362 		if (!error)
363 			error = smb_maperror(errclass, serror);
364 	}
365 	(void) md_get_uint8(mdp, NULL);		/* rpflags */
366 	(void) md_get_uint16(mdp, NULL);	/* rpflags2 */
367 
368 	(void) md_get_uint32(mdp, NULL);
369 	(void) md_get_uint32(mdp, NULL);
370 	(void) md_get_uint32(mdp, NULL);
371 
372 	(void) md_get_uint16le(mdp, &rqp->sr_rptid);
373 	(void) md_get_uint16le(mdp, &rqp->sr_rppid);
374 	(void) md_get_uint16le(mdp, &rqp->sr_rpuid);
375 	(void) md_get_uint16le(mdp, &rqp->sr_rpmid);
376 
377 	SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
378 	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
379 	    errclass, serror));
380 	return (error);
381 }
382 
383 void
smb_rq_setcallback(struct smb_rq * rqp,void (* recvcallb)(void *),void * arg)384 smb_rq_setcallback(struct smb_rq *rqp, void (*recvcallb)(void *), void *arg)
385 {
386 	SMBRQ_SLOCK(rqp);
387 	rqp->sr_recvcallback = recvcallb;
388 	rqp->sr_recvarg = arg;
389 	SMBRQ_SUNLOCK(rqp);
390 }
391 
392 #define ALIGN4(a)	(((a) + 3) & ~3)
393 
394 /*
395  * TRANS2 request implementation
396  */
397 int
smb_t2_alloc(struct smb_connobj * layer,u_short setup,struct smb_cred * scred,struct smb_t2rq ** t2pp)398 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
399 	struct smb_t2rq **t2pp)
400 {
401 	struct smb_t2rq *t2p;
402 	int error;
403 
404 	t2p = pool_get(&smbt2rq_pool, PR_WAITOK);
405 	error = smb_t2_init(t2p, layer, setup, scred);
406 	t2p->t2_flags |= SMBT2_ALLOCED;
407 	if (error) {
408 		smb_t2_done(t2p);
409 		return error;
410 	}
411 	*t2pp = t2p;
412 	return 0;
413 }
414 
415 static int
smb_t2_init(struct smb_t2rq * t2p,struct smb_connobj * source,u_short setup,struct smb_cred * scred)416 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
417 	struct smb_cred *scred)
418 {
419 	int error;
420 
421 	memset(t2p, 0, sizeof(*t2p));
422 	t2p->t2_source = source;
423 	t2p->t2_setupcount = 1;
424 	t2p->t2_setupdata = t2p->t2_setup;
425 	t2p->t2_setup[0] = setup;
426 	t2p->t2_fid = 0xffff;
427 	t2p->t2_cred = scred;
428 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
429 	if (error)
430 		return error;
431 	return 0;
432 }
433 
434 void
smb_t2_done(struct smb_t2rq * t2p)435 smb_t2_done(struct smb_t2rq *t2p)
436 {
437 	mb_done(&t2p->t2_tparam);
438 	mb_done(&t2p->t2_tdata);
439 	md_done(&t2p->t2_rparam);
440 	md_done(&t2p->t2_rdata);
441 	if (t2p->t2_flags & SMBT2_ALLOCED)
442 		pool_put(&smbt2rq_pool, t2p);
443 }
444 
445 static int
smb_t2_placedata(struct mbuf * mtop,u_int16_t offset,u_int16_t count,struct mdchain * mdp)446 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
447 	struct mdchain *mdp)
448 {
449 	struct mbuf *m, *m0;
450 	int len;
451 
452 	m0 = m_split(mtop, offset, M_WAIT);
453 	if (m0 == NULL)
454 		return EBADRPC;
455 	for(len = 0, m = m0; m->m_next; m = m->m_next)
456 		len += m->m_len;
457 	len += m->m_len;
458 	m->m_len -= len - count;
459 	if (mdp->md_top == NULL) {
460 		md_initm(mdp, m0);
461 	} else
462 		m_cat(mdp->md_top, m0);
463 	return 0;
464 }
465 
466 static int
smb_t2_reply(struct smb_t2rq * t2p)467 smb_t2_reply(struct smb_t2rq *t2p)
468 {
469 	struct mdchain *mdp;
470 	struct smb_rq *rqp = t2p->t2_rq;
471 	int error, totpgot, totdgot;
472 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
473 	u_int16_t tmp, bc, dcount;
474 	u_int8_t wc;
475 
476 	error = smb_rq_reply(rqp);
477 	if (error)
478 		return error;
479 	if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
480 		/*
481 		 * this is an interim response, ignore it.
482 		 */
483 		SMBRQ_SLOCK(rqp);
484 		md_next_record(&rqp->sr_rp);
485 		SMBRQ_SUNLOCK(rqp);
486 		return 0;
487 	}
488 	/*
489 	 * Now we have to get all subseqent responses. The CIFS specification
490 	 * says that they can be misordered which is weird.
491 	 * TODO: timo
492 	 */
493 	totpgot = totdgot = 0;
494 	totpcount = totdcount = 0xffff;
495 	mdp = &rqp->sr_rp;
496 	for (;;) {
497 		m_dumpm(mdp->md_top);
498 		if ((error = md_get_uint8(mdp, &wc)) != 0)
499 			break;
500 		if (wc < 10) {
501 			error = ENOENT;
502 			break;
503 		}
504 		if ((error = md_get_uint16le(mdp, &tmp)) != 0)
505 			break;
506 		if (totpcount > tmp)
507 			totpcount = tmp;
508 		md_get_uint16le(mdp, &tmp);
509 		if (totdcount > tmp)
510 			totdcount = tmp;
511 		if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
512 		    (error = md_get_uint16le(mdp, &pcount)) != 0 ||
513 		    (error = md_get_uint16le(mdp, &poff)) != 0 ||
514 		    (error = md_get_uint16le(mdp, &pdisp)) != 0)
515 			break;
516 		if (pcount != 0 && pdisp != totpgot) {
517 			SMBERROR(("Can't handle misordered parameters %d:%d\n",
518 			    pdisp, totpgot));
519 			error = EINVAL;
520 			break;
521 		}
522 		if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
523 		    (error = md_get_uint16le(mdp, &doff)) != 0 ||
524 		    (error = md_get_uint16le(mdp, &ddisp)) != 0)
525 			break;
526 		if (dcount != 0 && ddisp != totdgot) {
527 			SMBERROR(("Can't handle misordered data\n"));
528 			error = EINVAL;
529 			break;
530 		}
531 		md_get_uint8(mdp, &wc);
532 		md_get_uint8(mdp, NULL);
533 		tmp = wc;
534 		while (tmp--)
535 			md_get_uint16(mdp, NULL);
536 		if ((error = md_get_uint16le(mdp, &bc)) != 0)
537 			break;
538 /*		tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
539 		if (dcount) {
540 			error = smb_t2_placedata(mdp->md_top, doff, dcount,
541 			    &t2p->t2_rdata);
542 			if (error)
543 				break;
544 		}
545 		if (pcount) {
546 			error = smb_t2_placedata(mdp->md_top, poff, pcount,
547 			    &t2p->t2_rparam);
548 			if (error)
549 				break;
550 		}
551 		totpgot += pcount;
552 		totdgot += dcount;
553 		if (totpgot >= totpcount && totdgot >= totdcount) {
554 			error = 0;
555 			t2p->t2_flags |= SMBT2_ALLRECV;
556 			break;
557 		}
558 		/*
559 		 * We're done with this reply, look for the next one.
560 		 */
561 		SMBRQ_SLOCK(rqp);
562 		md_next_record(&rqp->sr_rp);
563 		SMBRQ_SUNLOCK(rqp);
564 		error = smb_rq_reply(rqp);
565 		if (error)
566 			break;
567 	}
568 	return error;
569 }
570 
571 /*
572  * Perform a full round of TRANS2 request
573  */
574 static int
smb_t2_request_int(struct smb_t2rq * t2p)575 smb_t2_request_int(struct smb_t2rq *t2p)
576 {
577 	struct smb_vc *vcp = t2p->t2_vc;
578 	struct smb_cred *scred = t2p->t2_cred;
579 	struct mbchain *mbp;
580 	struct mdchain *mdp, mbparam, mbdata;
581 	struct mbuf *m;
582 	struct smb_rq *rqp;
583 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
584 	int error, doff, poff, txdcount, txpcount, nmlen;
585 
586 	m = t2p->t2_tparam.mb_top;
587 	if (m) {
588 		md_initm(&mbparam, m);	/* do not free it! */
589 		totpcount = m_fixhdr(m);
590 		if (totpcount > 0xffff)		/* maxvalue for u_short */
591 			return EINVAL;
592 	} else
593 		totpcount = 0;
594 	m = t2p->t2_tdata.mb_top;
595 	if (m) {
596 		md_initm(&mbdata, m);	/* do not free it! */
597 		totdcount =  m_fixhdr(m);
598 		if (totdcount > 0xffff)
599 			return EINVAL;
600 	} else
601 		totdcount = 0;
602 	leftdcount = totdcount;
603 	leftpcount = totpcount;
604 	txmax = vcp->vc_txmax;
605 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
606 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
607 	if (error)
608 		return error;
609 	rqp->sr_flags |= SMBR_MULTIPACKET;
610 	t2p->t2_rq = rqp;
611 	mbp = &rqp->sr_rq;
612 	smb_rq_wstart(rqp);
613 	mb_put_uint16le(mbp, totpcount);
614 	mb_put_uint16le(mbp, totdcount);
615 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
616 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
617 	mb_put_uint8(mbp, t2p->t2_maxscount);
618 	mb_put_uint8(mbp, 0);			/* reserved */
619 	mb_put_uint16le(mbp, 0);			/* flags */
620 	mb_put_uint32le(mbp, 0);			/* Timeout */
621 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
622 	len = mb_fixhdr(mbp);
623 	/*
624 	 * now we have known packet size as
625 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
626 	 * and need to decide which parts should go into the first request
627 	 */
628 	nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
629 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
630 	if (len + leftpcount > txmax) {
631 		txpcount = min(leftpcount, txmax - len);
632 		poff = len;
633 		txdcount = 0;
634 		doff = 0;
635 	} else {
636 		txpcount = leftpcount;
637 		poff = txpcount ? len : 0;
638 		len = ALIGN4(len + txpcount);
639 		txdcount = min(leftdcount, txmax - len);
640 		doff = txdcount ? len : 0;
641 	}
642 	leftpcount -= txpcount;
643 	leftdcount -= txdcount;
644 	mb_put_uint16le(mbp, txpcount);
645 	mb_put_uint16le(mbp, poff);
646 	mb_put_uint16le(mbp, txdcount);
647 	mb_put_uint16le(mbp, doff);
648 	mb_put_uint8(mbp, t2p->t2_setupcount);
649 	mb_put_uint8(mbp, 0);
650 	for (i = 0; i < t2p->t2_setupcount; i++)
651 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
652 	smb_rq_wend(rqp);
653 	smb_rq_bstart(rqp);
654 	/* TDUNICODE */
655 	if (t2p->t_name)
656 		mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
657 	mb_put_uint8(mbp, 0);	/* terminating zero */
658 	len = mb_fixhdr(mbp);
659 	if (txpcount) {
660 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
661 		error = md_get_mbuf(&mbparam, txpcount, &m);
662 		SMBSDEBUG(("%d:%d:%d\n", error, txpcount, txmax));
663 		if (error)
664 			goto freerq;
665 		mb_put_mbuf(mbp, m);
666 	}
667 	len = mb_fixhdr(mbp);
668 	if (txdcount) {
669 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
670 		error = md_get_mbuf(&mbdata, txdcount, &m);
671 		if (error)
672 			goto freerq;
673 		mb_put_mbuf(mbp, m);
674 	}
675 	smb_rq_bend(rqp);	/* incredible, but thats it... */
676 	error = smb_rq_enqueue(rqp);
677 	if (error)
678 		goto freerq;
679 	if (leftpcount == 0 && leftdcount == 0)
680 		t2p->t2_flags |= SMBT2_ALLSENT;
681 	error = smb_t2_reply(t2p);
682 	if (error)
683 		goto bad;
684 	while (leftpcount || leftdcount) {
685 		error = smb_rq_new(rqp, t2p->t_name ?
686 		    SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
687 		if (error)
688 			goto bad;
689 		mbp = &rqp->sr_rq;
690 		smb_rq_wstart(rqp);
691 		mb_put_uint16le(mbp, totpcount);
692 		mb_put_uint16le(mbp, totdcount);
693 		len = mb_fixhdr(mbp);
694 		/*
695 		 * now we have known packet size as
696 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
697 		 * and need to decide which parts should go into request
698 		 */
699 		len = ALIGN4(len + 6 * 2 + 2);
700 		if (t2p->t_name == NULL)
701 			len += 2;
702 		if (len + leftpcount > txmax) {
703 			txpcount = min(leftpcount, txmax - len);
704 			poff = len;
705 			txdcount = 0;
706 			doff = 0;
707 		} else {
708 			txpcount = leftpcount;
709 			poff = txpcount ? len : 0;
710 			len = ALIGN4(len + txpcount);
711 			txdcount = min(leftdcount, txmax - len);
712 			doff = txdcount ? len : 0;
713 		}
714 		mb_put_uint16le(mbp, txpcount);
715 		mb_put_uint16le(mbp, poff);
716 		mb_put_uint16le(mbp, totpcount - leftpcount);
717 		mb_put_uint16le(mbp, txdcount);
718 		mb_put_uint16le(mbp, doff);
719 		mb_put_uint16le(mbp, totdcount - leftdcount);
720 		leftpcount -= txpcount;
721 		leftdcount -= txdcount;
722 		if (t2p->t_name == NULL)
723 			mb_put_uint16le(mbp, t2p->t2_fid);
724 		smb_rq_wend(rqp);
725 		smb_rq_bstart(rqp);
726 		mb_put_uint8(mbp, 0);	/* name */
727 		len = mb_fixhdr(mbp);
728 		if (txpcount) {
729 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
730 			error = md_get_mbuf(&mbparam, txpcount, &m);
731 			if (error)
732 				goto bad;
733 			mb_put_mbuf(mbp, m);
734 		}
735 		len = mb_fixhdr(mbp);
736 		if (txdcount) {
737 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
738 			error = md_get_mbuf(&mbdata, txdcount, &m);
739 			if (error)
740 				goto bad;
741 			mb_put_mbuf(mbp, m);
742 		}
743 		smb_rq_bend(rqp);
744 		rqp->sr_state = SMBRQ_NOTSENT;
745 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
746 		if (error)
747 			goto bad;
748 	}	/* while left params or data */
749 	t2p->t2_flags |= SMBT2_ALLSENT;
750 	mdp = &t2p->t2_rdata;
751 	if (mdp->md_top) {
752 		m_fixhdr(mdp->md_top);
753 		md_initm(mdp, mdp->md_top);
754 	}
755 	mdp = &t2p->t2_rparam;
756 	if (mdp->md_top) {
757 		m_fixhdr(mdp->md_top);
758 		md_initm(mdp, mdp->md_top);
759 	}
760 bad:
761 	smb_iod_removerq(rqp);
762 freerq:
763 	smb_rq_done(rqp);
764 	if (error) {
765 		if (rqp->sr_flags & SMBR_RESTART)
766 			t2p->t2_flags |= SMBT2_RESTART;
767 		md_done(&t2p->t2_rparam);
768 		md_done(&t2p->t2_rdata);
769 	}
770 	return error;
771 }
772 
773 int
smb_t2_request(struct smb_t2rq * t2p)774 smb_t2_request(struct smb_t2rq *t2p)
775 {
776 	int error = EINVAL, i;
777 
778 	for (i = 0; i < SMB_MAXRCN; i++) {
779 		t2p->t2_flags &= ~SMBT2_RESTART;
780 		error = smb_t2_request_int(t2p);
781 		if (error == 0)
782 			break;
783 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
784 			break;
785 	}
786 	return error;
787 }
788