1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <linux/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31 #include "compress.h"
32
33 /* Max number of iovectors we can use off the stack when sending requests. */
34 #define CIFS_MAX_IOV_SIZE 8
35
36 void
cifs_wake_up_task(struct mid_q_entry * mid)37 cifs_wake_up_task(struct mid_q_entry *mid)
38 {
39 if (mid->mid_state == MID_RESPONSE_RECEIVED)
40 mid->mid_state = MID_RESPONSE_READY;
41 wake_up_process(mid->callback_data);
42 }
43
44 static struct mid_q_entry *
alloc_mid(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)45 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
46 {
47 struct mid_q_entry *temp;
48
49 if (server == NULL) {
50 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
51 return NULL;
52 }
53
54 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
55 memset(temp, 0, sizeof(struct mid_q_entry));
56 kref_init(&temp->refcount);
57 temp->mid = get_mid(smb_buffer);
58 temp->pid = current->pid;
59 temp->command = cpu_to_le16(smb_buffer->Command);
60 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
61 /* easier to use jiffies */
62 /* when mid allocated can be before when sent */
63 temp->when_alloc = jiffies;
64 temp->server = server;
65
66 /*
67 * The default is for the mid to be synchronous, so the
68 * default callback just wakes up the current task.
69 */
70 get_task_struct(current);
71 temp->creator = current;
72 temp->callback = cifs_wake_up_task;
73 temp->callback_data = current;
74
75 atomic_inc(&mid_count);
76 temp->mid_state = MID_REQUEST_ALLOCATED;
77 return temp;
78 }
79
__release_mid(struct kref * refcount)80 void __release_mid(struct kref *refcount)
81 {
82 struct mid_q_entry *midEntry =
83 container_of(refcount, struct mid_q_entry, refcount);
84 #ifdef CONFIG_CIFS_STATS2
85 __le16 command = midEntry->server->vals->lock_cmd;
86 __u16 smb_cmd = le16_to_cpu(midEntry->command);
87 unsigned long now;
88 unsigned long roundtrip_time;
89 #endif
90 struct TCP_Server_Info *server = midEntry->server;
91
92 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
93 (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
94 midEntry->mid_state == MID_RESPONSE_READY) &&
95 server->ops->handle_cancelled_mid)
96 server->ops->handle_cancelled_mid(midEntry, server);
97
98 midEntry->mid_state = MID_FREE;
99 atomic_dec(&mid_count);
100 if (midEntry->large_buf)
101 cifs_buf_release(midEntry->resp_buf);
102 else
103 cifs_small_buf_release(midEntry->resp_buf);
104 #ifdef CONFIG_CIFS_STATS2
105 now = jiffies;
106 if (now < midEntry->when_alloc)
107 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
108 roundtrip_time = now - midEntry->when_alloc;
109
110 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
111 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
112 server->slowest_cmd[smb_cmd] = roundtrip_time;
113 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 } else {
115 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
116 server->slowest_cmd[smb_cmd] = roundtrip_time;
117 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
118 server->fastest_cmd[smb_cmd] = roundtrip_time;
119 }
120 cifs_stats_inc(&server->num_cmds[smb_cmd]);
121 server->time_per_cmd[smb_cmd] += roundtrip_time;
122 }
123 /*
124 * commands taking longer than one second (default) can be indications
125 * that something is wrong, unless it is quite a slow link or a very
126 * busy server. Note that this calc is unlikely or impossible to wrap
127 * as long as slow_rsp_threshold is not set way above recommended max
128 * value (32767 ie 9 hours) and is generally harmless even if wrong
129 * since only affects debug counters - so leaving the calc as simple
130 * comparison rather than doing multiple conversions and overflow
131 * checks
132 */
133 if ((slow_rsp_threshold != 0) &&
134 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
135 (midEntry->command != command)) {
136 /*
137 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
138 * NB: le16_to_cpu returns unsigned so can not be negative below
139 */
140 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
141 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
142
143 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
144 midEntry->when_sent, midEntry->when_received);
145 if (cifsFYI & CIFS_TIMER) {
146 pr_debug("slow rsp: cmd %d mid %llu",
147 midEntry->command, midEntry->mid);
148 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
149 now - midEntry->when_alloc,
150 now - midEntry->when_sent,
151 now - midEntry->when_received);
152 }
153 }
154 #endif
155 put_task_struct(midEntry->creator);
156
157 mempool_free(midEntry, cifs_mid_poolp);
158 }
159
160 void
delete_mid(struct mid_q_entry * mid)161 delete_mid(struct mid_q_entry *mid)
162 {
163 spin_lock(&mid->server->mid_lock);
164 if (!(mid->mid_flags & MID_DELETED)) {
165 list_del_init(&mid->qhead);
166 mid->mid_flags |= MID_DELETED;
167 }
168 spin_unlock(&mid->server->mid_lock);
169
170 release_mid(mid);
171 }
172
173 /*
174 * smb_send_kvec - send an array of kvecs to the server
175 * @server: Server to send the data to
176 * @smb_msg: Message to send
177 * @sent: amount of data sent on socket is stored here
178 *
179 * Our basic "send data to server" function. Should be called with srv_mutex
180 * held. The caller is responsible for handling the results.
181 */
182 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)183 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
184 size_t *sent)
185 {
186 int rc = 0;
187 int retries = 0;
188 struct socket *ssocket = server->ssocket;
189
190 *sent = 0;
191
192 if (server->noblocksnd)
193 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
194 else
195 smb_msg->msg_flags = MSG_NOSIGNAL;
196
197 while (msg_data_left(smb_msg)) {
198 /*
199 * If blocking send, we try 3 times, since each can block
200 * for 5 seconds. For nonblocking we have to try more
201 * but wait increasing amounts of time allowing time for
202 * socket to clear. The overall time we wait in either
203 * case to send on the socket is about 15 seconds.
204 * Similarly we wait for 15 seconds for a response from
205 * the server in SendReceive[2] for the server to send
206 * a response back for most types of requests (except
207 * SMB Write past end of file which can be slow, and
208 * blocking lock operations). NFS waits slightly longer
209 * than CIFS, but this can make it take longer for
210 * nonresponsive servers to be detected and 15 seconds
211 * is more than enough time for modern networks to
212 * send a packet. In most cases if we fail to send
213 * after the retries we will kill the socket and
214 * reconnect which may clear the network problem.
215 */
216 rc = sock_sendmsg(ssocket, smb_msg);
217 if (rc == -EAGAIN) {
218 retries++;
219 if (retries >= 14 ||
220 (!server->noblocksnd && (retries > 2))) {
221 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
222 ssocket);
223 return -EAGAIN;
224 }
225 msleep(1 << retries);
226 continue;
227 }
228
229 if (rc < 0)
230 return rc;
231
232 if (rc == 0) {
233 /* should never happen, letting socket clear before
234 retrying is our only obvious option here */
235 cifs_server_dbg(VFS, "tcp sent no data\n");
236 msleep(500);
237 continue;
238 }
239
240 /* send was at least partially successful */
241 *sent += rc;
242 retries = 0; /* in case we get ENOSPC on the next send */
243 }
244 return 0;
245 }
246
247 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)248 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
249 {
250 unsigned int i;
251 struct kvec *iov;
252 int nvec;
253 unsigned long buflen = 0;
254
255 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
256 rqst->rq_iov[0].iov_len == 4) {
257 iov = &rqst->rq_iov[1];
258 nvec = rqst->rq_nvec - 1;
259 } else {
260 iov = rqst->rq_iov;
261 nvec = rqst->rq_nvec;
262 }
263
264 /* total up iov array first */
265 for (i = 0; i < nvec; i++)
266 buflen += iov[i].iov_len;
267
268 buflen += iov_iter_count(&rqst->rq_iter);
269 return buflen;
270 }
271
272 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)273 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
274 struct smb_rqst *rqst)
275 {
276 int rc;
277 struct kvec *iov;
278 int n_vec;
279 unsigned int send_length = 0;
280 unsigned int i, j;
281 sigset_t mask, oldmask;
282 size_t total_len = 0, sent, size;
283 struct socket *ssocket = server->ssocket;
284 struct msghdr smb_msg = {};
285 __be32 rfc1002_marker;
286
287 cifs_in_send_inc(server);
288 if (cifs_rdma_enabled(server)) {
289 /* return -EAGAIN when connecting or reconnecting */
290 rc = -EAGAIN;
291 if (server->smbd_conn)
292 rc = smbd_send(server, num_rqst, rqst);
293 goto smbd_done;
294 }
295
296 rc = -EAGAIN;
297 if (ssocket == NULL)
298 goto out;
299
300 rc = -ERESTARTSYS;
301 if (fatal_signal_pending(current)) {
302 cifs_dbg(FYI, "signal pending before send request\n");
303 goto out;
304 }
305
306 rc = 0;
307 /* cork the socket */
308 tcp_sock_set_cork(ssocket->sk, true);
309
310 for (j = 0; j < num_rqst; j++)
311 send_length += smb_rqst_len(server, &rqst[j]);
312 rfc1002_marker = cpu_to_be32(send_length);
313
314 /*
315 * We should not allow signals to interrupt the network send because
316 * any partial send will cause session reconnects thus increasing
317 * latency of system calls and overload a server with unnecessary
318 * requests.
319 */
320
321 sigfillset(&mask);
322 sigprocmask(SIG_BLOCK, &mask, &oldmask);
323
324 /* Generate a rfc1002 marker for SMB2+ */
325 if (!is_smb1(server)) {
326 struct kvec hiov = {
327 .iov_base = &rfc1002_marker,
328 .iov_len = 4
329 };
330 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
331 rc = smb_send_kvec(server, &smb_msg, &sent);
332 if (rc < 0)
333 goto unmask;
334
335 total_len += sent;
336 send_length += 4;
337 }
338
339 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
340
341 for (j = 0; j < num_rqst; j++) {
342 iov = rqst[j].rq_iov;
343 n_vec = rqst[j].rq_nvec;
344
345 size = 0;
346 for (i = 0; i < n_vec; i++) {
347 dump_smb(iov[i].iov_base, iov[i].iov_len);
348 size += iov[i].iov_len;
349 }
350
351 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
352
353 rc = smb_send_kvec(server, &smb_msg, &sent);
354 if (rc < 0)
355 goto unmask;
356
357 total_len += sent;
358
359 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
360 smb_msg.msg_iter = rqst[j].rq_iter;
361 rc = smb_send_kvec(server, &smb_msg, &sent);
362 if (rc < 0)
363 break;
364 total_len += sent;
365 }
366
367 }
368
369 unmask:
370 sigprocmask(SIG_SETMASK, &oldmask, NULL);
371
372 /*
373 * If signal is pending but we have already sent the whole packet to
374 * the server we need to return success status to allow a corresponding
375 * mid entry to be kept in the pending requests queue thus allowing
376 * to handle responses from the server by the client.
377 *
378 * If only part of the packet has been sent there is no need to hide
379 * interrupt because the session will be reconnected anyway, so there
380 * won't be any response from the server to handle.
381 */
382
383 if (signal_pending(current) && (total_len != send_length)) {
384 cifs_dbg(FYI, "signal is pending after attempt to send\n");
385 rc = -ERESTARTSYS;
386 }
387
388 /* uncork it */
389 tcp_sock_set_cork(ssocket->sk, false);
390
391 if ((total_len > 0) && (total_len != send_length)) {
392 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
393 send_length, total_len);
394 /*
395 * If we have only sent part of an SMB then the next SMB could
396 * be taken as the remainder of this one. We need to kill the
397 * socket so the server throws away the partial SMB
398 */
399 cifs_signal_cifsd_for_reconnect(server, false);
400 trace_smb3_partial_send_reconnect(server->CurrentMid,
401 server->conn_id, server->hostname);
402 }
403 smbd_done:
404 /*
405 * there's hardly any use for the layers above to know the
406 * actual error code here. All they should do at this point is
407 * to retry the connection and hope it goes away.
408 */
409 if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
410 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
411 rc);
412 rc = -ECONNABORTED;
413 cifs_signal_cifsd_for_reconnect(server, false);
414 } else if (rc > 0)
415 rc = 0;
416 out:
417 cifs_in_send_dec(server);
418 return rc;
419 }
420
421 struct send_req_vars {
422 struct smb2_transform_hdr tr_hdr;
423 struct smb_rqst rqst[MAX_COMPOUND];
424 struct kvec iov;
425 };
426
427 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)428 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
429 struct smb_rqst *rqst, int flags)
430 {
431 struct send_req_vars *vars;
432 struct smb_rqst *cur_rqst;
433 struct kvec *iov;
434 int rc;
435
436 if (flags & CIFS_COMPRESS_REQ)
437 return smb_compress(server, &rqst[0], __smb_send_rqst);
438
439 if (!(flags & CIFS_TRANSFORM_REQ))
440 return __smb_send_rqst(server, num_rqst, rqst);
441
442 if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
443 return -EIO;
444
445 if (!server->ops->init_transform_rq) {
446 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
447 return -EIO;
448 }
449
450 vars = kzalloc(sizeof(*vars), GFP_NOFS);
451 if (!vars)
452 return -ENOMEM;
453 cur_rqst = vars->rqst;
454 iov = &vars->iov;
455
456 iov->iov_base = &vars->tr_hdr;
457 iov->iov_len = sizeof(vars->tr_hdr);
458 cur_rqst[0].rq_iov = iov;
459 cur_rqst[0].rq_nvec = 1;
460
461 rc = server->ops->init_transform_rq(server, num_rqst + 1,
462 &cur_rqst[0], rqst);
463 if (rc)
464 goto out;
465
466 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
467 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
468 out:
469 kfree(vars);
470 return rc;
471 }
472
473 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)474 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
475 unsigned int smb_buf_length)
476 {
477 struct kvec iov[2];
478 struct smb_rqst rqst = { .rq_iov = iov,
479 .rq_nvec = 2 };
480
481 iov[0].iov_base = smb_buffer;
482 iov[0].iov_len = 4;
483 iov[1].iov_base = (char *)smb_buffer + 4;
484 iov[1].iov_len = smb_buf_length;
485
486 return __smb_send_rqst(server, 1, &rqst);
487 }
488
489 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)490 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
491 const int timeout, const int flags,
492 unsigned int *instance)
493 {
494 long rc;
495 int *credits;
496 int optype;
497 long int t;
498 int scredits, in_flight;
499
500 if (timeout < 0)
501 t = MAX_JIFFY_OFFSET;
502 else
503 t = msecs_to_jiffies(timeout);
504
505 optype = flags & CIFS_OP_MASK;
506
507 *instance = 0;
508
509 credits = server->ops->get_credits_field(server, optype);
510 /* Since an echo is already inflight, no need to wait to send another */
511 if (*credits <= 0 && optype == CIFS_ECHO_OP)
512 return -EAGAIN;
513
514 spin_lock(&server->req_lock);
515 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
516 /* oplock breaks must not be held up */
517 server->in_flight++;
518 if (server->in_flight > server->max_in_flight)
519 server->max_in_flight = server->in_flight;
520 *credits -= 1;
521 *instance = server->reconnect_instance;
522 scredits = *credits;
523 in_flight = server->in_flight;
524 spin_unlock(&server->req_lock);
525
526 trace_smb3_nblk_credits(server->CurrentMid,
527 server->conn_id, server->hostname, scredits, -1, in_flight);
528 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
529 __func__, 1, scredits);
530
531 return 0;
532 }
533
534 while (1) {
535 spin_unlock(&server->req_lock);
536
537 spin_lock(&server->srv_lock);
538 if (server->tcpStatus == CifsExiting) {
539 spin_unlock(&server->srv_lock);
540 return -ENOENT;
541 }
542 spin_unlock(&server->srv_lock);
543
544 spin_lock(&server->req_lock);
545 if (*credits < num_credits) {
546 scredits = *credits;
547 spin_unlock(&server->req_lock);
548
549 cifs_num_waiters_inc(server);
550 rc = wait_event_killable_timeout(server->request_q,
551 has_credits(server, credits, num_credits), t);
552 cifs_num_waiters_dec(server);
553 if (!rc) {
554 spin_lock(&server->req_lock);
555 scredits = *credits;
556 in_flight = server->in_flight;
557 spin_unlock(&server->req_lock);
558
559 trace_smb3_credit_timeout(server->CurrentMid,
560 server->conn_id, server->hostname, scredits,
561 num_credits, in_flight);
562 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
563 timeout);
564 return -EBUSY;
565 }
566 if (rc == -ERESTARTSYS)
567 return -ERESTARTSYS;
568 spin_lock(&server->req_lock);
569 } else {
570 /*
571 * For normal commands, reserve the last MAX_COMPOUND
572 * credits to compound requests.
573 * Otherwise these compounds could be permanently
574 * starved for credits by single-credit requests.
575 *
576 * To prevent spinning CPU, block this thread until
577 * there are >MAX_COMPOUND credits available.
578 * But only do this is we already have a lot of
579 * credits in flight to avoid triggering this check
580 * for servers that are slow to hand out credits on
581 * new sessions.
582 */
583 if (!optype && num_credits == 1 &&
584 server->in_flight > 2 * MAX_COMPOUND &&
585 *credits <= MAX_COMPOUND) {
586 spin_unlock(&server->req_lock);
587
588 cifs_num_waiters_inc(server);
589 rc = wait_event_killable_timeout(
590 server->request_q,
591 has_credits(server, credits,
592 MAX_COMPOUND + 1),
593 t);
594 cifs_num_waiters_dec(server);
595 if (!rc) {
596 spin_lock(&server->req_lock);
597 scredits = *credits;
598 in_flight = server->in_flight;
599 spin_unlock(&server->req_lock);
600
601 trace_smb3_credit_timeout(
602 server->CurrentMid,
603 server->conn_id, server->hostname,
604 scredits, num_credits, in_flight);
605 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
606 timeout);
607 return -EBUSY;
608 }
609 if (rc == -ERESTARTSYS)
610 return -ERESTARTSYS;
611 spin_lock(&server->req_lock);
612 continue;
613 }
614
615 /*
616 * Can not count locking commands against total
617 * as they are allowed to block on server.
618 */
619
620 /* update # of requests on the wire to server */
621 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
622 *credits -= num_credits;
623 server->in_flight += num_credits;
624 if (server->in_flight > server->max_in_flight)
625 server->max_in_flight = server->in_flight;
626 *instance = server->reconnect_instance;
627 }
628 scredits = *credits;
629 in_flight = server->in_flight;
630 spin_unlock(&server->req_lock);
631
632 trace_smb3_waitff_credits(server->CurrentMid,
633 server->conn_id, server->hostname, scredits,
634 -(num_credits), in_flight);
635 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
636 __func__, num_credits, scredits);
637 break;
638 }
639 }
640 return 0;
641 }
642
643 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)644 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
645 unsigned int *instance)
646 {
647 return wait_for_free_credits(server, 1, -1, flags,
648 instance);
649 }
650
651 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)652 wait_for_compound_request(struct TCP_Server_Info *server, int num,
653 const int flags, unsigned int *instance)
654 {
655 int *credits;
656 int scredits, in_flight;
657
658 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
659
660 spin_lock(&server->req_lock);
661 scredits = *credits;
662 in_flight = server->in_flight;
663
664 if (*credits < num) {
665 /*
666 * If the server is tight on resources or just gives us less
667 * credits for other reasons (e.g. requests are coming out of
668 * order and the server delays granting more credits until it
669 * processes a missing mid) and we exhausted most available
670 * credits there may be situations when we try to send
671 * a compound request but we don't have enough credits. At this
672 * point the client needs to decide if it should wait for
673 * additional credits or fail the request. If at least one
674 * request is in flight there is a high probability that the
675 * server will return enough credits to satisfy this compound
676 * request.
677 *
678 * Return immediately if no requests in flight since we will be
679 * stuck on waiting for credits.
680 */
681 if (server->in_flight == 0) {
682 spin_unlock(&server->req_lock);
683 trace_smb3_insufficient_credits(server->CurrentMid,
684 server->conn_id, server->hostname, scredits,
685 num, in_flight);
686 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
687 __func__, in_flight, num, scredits);
688 return -EDEADLK;
689 }
690 }
691 spin_unlock(&server->req_lock);
692
693 return wait_for_free_credits(server, num, 60000, flags,
694 instance);
695 }
696
697 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,size_t size,size_t * num,struct cifs_credits * credits)698 cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
699 size_t *num, struct cifs_credits *credits)
700 {
701 *num = size;
702 credits->value = 0;
703 credits->instance = server->reconnect_instance;
704 return 0;
705 }
706
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)707 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
708 struct mid_q_entry **ppmidQ)
709 {
710 spin_lock(&ses->ses_lock);
711 if (ses->ses_status == SES_NEW) {
712 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
713 (in_buf->Command != SMB_COM_NEGOTIATE)) {
714 spin_unlock(&ses->ses_lock);
715 return -EAGAIN;
716 }
717 /* else ok - we are setting up session */
718 }
719
720 if (ses->ses_status == SES_EXITING) {
721 /* check if SMB session is bad because we are setting it up */
722 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
723 spin_unlock(&ses->ses_lock);
724 return -EAGAIN;
725 }
726 /* else ok - we are shutting down session */
727 }
728 spin_unlock(&ses->ses_lock);
729
730 *ppmidQ = alloc_mid(in_buf, ses->server);
731 if (*ppmidQ == NULL)
732 return -ENOMEM;
733 spin_lock(&ses->server->mid_lock);
734 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
735 spin_unlock(&ses->server->mid_lock);
736 return 0;
737 }
738
739 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)740 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
741 {
742 int error;
743
744 error = wait_event_state(server->response_q,
745 midQ->mid_state != MID_REQUEST_SUBMITTED &&
746 midQ->mid_state != MID_RESPONSE_RECEIVED,
747 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
748 if (error < 0)
749 return -ERESTARTSYS;
750
751 return 0;
752 }
753
754 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)755 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
756 {
757 int rc;
758 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
759 struct mid_q_entry *mid;
760
761 if (rqst->rq_iov[0].iov_len != 4 ||
762 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
763 return ERR_PTR(-EIO);
764
765 /* enable signing if server requires it */
766 if (server->sign)
767 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
768
769 mid = alloc_mid(hdr, server);
770 if (mid == NULL)
771 return ERR_PTR(-ENOMEM);
772
773 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
774 if (rc) {
775 release_mid(mid);
776 return ERR_PTR(rc);
777 }
778
779 return mid;
780 }
781
782 /*
783 * Send a SMB request and set the callback function in the mid to handle
784 * the result. Caller is responsible for dealing with timeouts.
785 */
786 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)787 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
788 mid_receive_t *receive, mid_callback_t *callback,
789 mid_handle_t *handle, void *cbdata, const int flags,
790 const struct cifs_credits *exist_credits)
791 {
792 int rc;
793 struct mid_q_entry *mid;
794 struct cifs_credits credits = { .value = 0, .instance = 0 };
795 unsigned int instance;
796 int optype;
797
798 optype = flags & CIFS_OP_MASK;
799
800 if ((flags & CIFS_HAS_CREDITS) == 0) {
801 rc = wait_for_free_request(server, flags, &instance);
802 if (rc)
803 return rc;
804 credits.value = 1;
805 credits.instance = instance;
806 } else
807 instance = exist_credits->instance;
808
809 cifs_server_lock(server);
810
811 /*
812 * We can't use credits obtained from the previous session to send this
813 * request. Check if there were reconnects after we obtained credits and
814 * return -EAGAIN in such cases to let callers handle it.
815 */
816 if (instance != server->reconnect_instance) {
817 cifs_server_unlock(server);
818 add_credits_and_wake_if(server, &credits, optype);
819 return -EAGAIN;
820 }
821
822 mid = server->ops->setup_async_request(server, rqst);
823 if (IS_ERR(mid)) {
824 cifs_server_unlock(server);
825 add_credits_and_wake_if(server, &credits, optype);
826 return PTR_ERR(mid);
827 }
828
829 mid->receive = receive;
830 mid->callback = callback;
831 mid->callback_data = cbdata;
832 mid->handle = handle;
833 mid->mid_state = MID_REQUEST_SUBMITTED;
834
835 /* put it on the pending_mid_q */
836 spin_lock(&server->mid_lock);
837 list_add_tail(&mid->qhead, &server->pending_mid_q);
838 spin_unlock(&server->mid_lock);
839
840 /*
841 * Need to store the time in mid before calling I/O. For call_async,
842 * I/O response may come back and free the mid entry on another thread.
843 */
844 cifs_save_when_sent(mid);
845 rc = smb_send_rqst(server, 1, rqst, flags);
846
847 if (rc < 0) {
848 revert_current_mid(server, mid->credits);
849 server->sequence_number -= 2;
850 delete_mid(mid);
851 }
852
853 cifs_server_unlock(server);
854
855 if (rc == 0)
856 return 0;
857
858 add_credits_and_wake_if(server, &credits, optype);
859 return rc;
860 }
861
862 /*
863 *
864 * Send an SMB Request. No response info (other than return code)
865 * needs to be parsed.
866 *
867 * flags indicate the type of request buffer and how long to wait
868 * and whether to log NT STATUS code (error) before mapping it to POSIX error
869 *
870 */
871 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)872 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
873 char *in_buf, int flags)
874 {
875 int rc;
876 struct kvec iov[1];
877 struct kvec rsp_iov;
878 int resp_buf_type;
879
880 iov[0].iov_base = in_buf;
881 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
882 flags |= CIFS_NO_RSP_BUF;
883 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
884 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
885
886 return rc;
887 }
888
889 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)890 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
891 {
892 int rc = 0;
893
894 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
895 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
896
897 spin_lock(&server->mid_lock);
898 switch (mid->mid_state) {
899 case MID_RESPONSE_READY:
900 spin_unlock(&server->mid_lock);
901 return rc;
902 case MID_RETRY_NEEDED:
903 rc = -EAGAIN;
904 break;
905 case MID_RESPONSE_MALFORMED:
906 rc = -EIO;
907 break;
908 case MID_SHUTDOWN:
909 rc = -EHOSTDOWN;
910 break;
911 default:
912 if (!(mid->mid_flags & MID_DELETED)) {
913 list_del_init(&mid->qhead);
914 mid->mid_flags |= MID_DELETED;
915 }
916 spin_unlock(&server->mid_lock);
917 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
918 __func__, mid->mid, mid->mid_state);
919 rc = -EIO;
920 goto sync_mid_done;
921 }
922 spin_unlock(&server->mid_lock);
923
924 sync_mid_done:
925 release_mid(mid);
926 return rc;
927 }
928
929 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)930 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
931 struct mid_q_entry *mid)
932 {
933 return server->ops->send_cancel ?
934 server->ops->send_cancel(server, rqst, mid) : 0;
935 }
936
937 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)938 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
939 bool log_error)
940 {
941 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
942
943 dump_smb(mid->resp_buf, min_t(u32, 92, len));
944
945 /* convert the length into a more usable form */
946 if (server->sign) {
947 struct kvec iov[2];
948 int rc = 0;
949 struct smb_rqst rqst = { .rq_iov = iov,
950 .rq_nvec = 2 };
951
952 iov[0].iov_base = mid->resp_buf;
953 iov[0].iov_len = 4;
954 iov[1].iov_base = (char *)mid->resp_buf + 4;
955 iov[1].iov_len = len - 4;
956 /* FIXME: add code to kill session */
957 rc = cifs_verify_signature(&rqst, server,
958 mid->sequence_number);
959 if (rc)
960 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
961 rc);
962 }
963
964 /* BB special case reconnect tid and uid here? */
965 return map_and_check_smb_error(mid, log_error);
966 }
967
968 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)969 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
970 struct smb_rqst *rqst)
971 {
972 int rc;
973 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
974 struct mid_q_entry *mid;
975
976 if (rqst->rq_iov[0].iov_len != 4 ||
977 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
978 return ERR_PTR(-EIO);
979
980 rc = allocate_mid(ses, hdr, &mid);
981 if (rc)
982 return ERR_PTR(rc);
983 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
984 if (rc) {
985 delete_mid(mid);
986 return ERR_PTR(rc);
987 }
988 return mid;
989 }
990
991 static void
cifs_compound_callback(struct mid_q_entry * mid)992 cifs_compound_callback(struct mid_q_entry *mid)
993 {
994 struct TCP_Server_Info *server = mid->server;
995 struct cifs_credits credits = {
996 .value = server->ops->get_credits(mid),
997 .instance = server->reconnect_instance,
998 };
999
1000 add_credits(server, &credits, mid->optype);
1001
1002 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1003 mid->mid_state = MID_RESPONSE_READY;
1004 }
1005
1006 static void
cifs_compound_last_callback(struct mid_q_entry * mid)1007 cifs_compound_last_callback(struct mid_q_entry *mid)
1008 {
1009 cifs_compound_callback(mid);
1010 cifs_wake_up_task(mid);
1011 }
1012
1013 static void
cifs_cancelled_callback(struct mid_q_entry * mid)1014 cifs_cancelled_callback(struct mid_q_entry *mid)
1015 {
1016 cifs_compound_callback(mid);
1017 release_mid(mid);
1018 }
1019
1020 /*
1021 * Return a channel (master if none) of @ses that can be used to send
1022 * regular requests.
1023 *
1024 * If we are currently binding a new channel (negprot/sess.setup),
1025 * return the new incomplete channel.
1026 */
cifs_pick_channel(struct cifs_ses * ses)1027 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1028 {
1029 uint index = 0;
1030 unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1031 struct TCP_Server_Info *server = NULL;
1032 int i;
1033
1034 if (!ses)
1035 return NULL;
1036
1037 spin_lock(&ses->chan_lock);
1038 for (i = 0; i < ses->chan_count; i++) {
1039 server = ses->chans[i].server;
1040 if (!server || server->terminate)
1041 continue;
1042
1043 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
1044 continue;
1045
1046 /*
1047 * strictly speaking, we should pick up req_lock to read
1048 * server->in_flight. But it shouldn't matter much here if we
1049 * race while reading this data. The worst that can happen is
1050 * that we could use a channel that's not least loaded. Avoiding
1051 * taking the lock could help reduce wait time, which is
1052 * important for this function
1053 */
1054 if (server->in_flight < min_in_flight) {
1055 min_in_flight = server->in_flight;
1056 index = i;
1057 }
1058 if (server->in_flight > max_in_flight)
1059 max_in_flight = server->in_flight;
1060 }
1061
1062 /* if all channels are equally loaded, fall back to round-robin */
1063 if (min_in_flight == max_in_flight) {
1064 index = (uint)atomic_inc_return(&ses->chan_seq);
1065 index %= ses->chan_count;
1066 }
1067
1068 server = ses->chans[index].server;
1069 spin_unlock(&ses->chan_lock);
1070
1071 return server;
1072 }
1073
1074 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1075 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1076 struct TCP_Server_Info *server,
1077 const int flags, const int num_rqst, struct smb_rqst *rqst,
1078 int *resp_buf_type, struct kvec *resp_iov)
1079 {
1080 int i, j, optype, rc = 0;
1081 struct mid_q_entry *midQ[MAX_COMPOUND];
1082 bool cancelled_mid[MAX_COMPOUND] = {false};
1083 struct cifs_credits credits[MAX_COMPOUND] = {
1084 { .value = 0, .instance = 0 }
1085 };
1086 unsigned int instance;
1087 char *buf;
1088
1089 optype = flags & CIFS_OP_MASK;
1090
1091 for (i = 0; i < num_rqst; i++)
1092 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1093
1094 if (!ses || !ses->server || !server) {
1095 cifs_dbg(VFS, "Null session\n");
1096 return -EIO;
1097 }
1098
1099 spin_lock(&server->srv_lock);
1100 if (server->tcpStatus == CifsExiting) {
1101 spin_unlock(&server->srv_lock);
1102 return -ENOENT;
1103 }
1104 spin_unlock(&server->srv_lock);
1105
1106 /*
1107 * Wait for all the requests to become available.
1108 * This approach still leaves the possibility to be stuck waiting for
1109 * credits if the server doesn't grant credits to the outstanding
1110 * requests and if the client is completely idle, not generating any
1111 * other requests.
1112 * This can be handled by the eventual session reconnect.
1113 */
1114 rc = wait_for_compound_request(server, num_rqst, flags,
1115 &instance);
1116 if (rc)
1117 return rc;
1118
1119 for (i = 0; i < num_rqst; i++) {
1120 credits[i].value = 1;
1121 credits[i].instance = instance;
1122 }
1123
1124 /*
1125 * Make sure that we sign in the same order that we send on this socket
1126 * and avoid races inside tcp sendmsg code that could cause corruption
1127 * of smb data.
1128 */
1129
1130 cifs_server_lock(server);
1131
1132 /*
1133 * All the parts of the compound chain belong obtained credits from the
1134 * same session. We can not use credits obtained from the previous
1135 * session to send this request. Check if there were reconnects after
1136 * we obtained credits and return -EAGAIN in such cases to let callers
1137 * handle it.
1138 */
1139 if (instance != server->reconnect_instance) {
1140 cifs_server_unlock(server);
1141 for (j = 0; j < num_rqst; j++)
1142 add_credits(server, &credits[j], optype);
1143 return -EAGAIN;
1144 }
1145
1146 for (i = 0; i < num_rqst; i++) {
1147 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1148 if (IS_ERR(midQ[i])) {
1149 revert_current_mid(server, i);
1150 for (j = 0; j < i; j++)
1151 delete_mid(midQ[j]);
1152 cifs_server_unlock(server);
1153
1154 /* Update # of requests on wire to server */
1155 for (j = 0; j < num_rqst; j++)
1156 add_credits(server, &credits[j], optype);
1157 return PTR_ERR(midQ[i]);
1158 }
1159
1160 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1161 midQ[i]->optype = optype;
1162 /*
1163 * Invoke callback for every part of the compound chain
1164 * to calculate credits properly. Wake up this thread only when
1165 * the last element is received.
1166 */
1167 if (i < num_rqst - 1)
1168 midQ[i]->callback = cifs_compound_callback;
1169 else
1170 midQ[i]->callback = cifs_compound_last_callback;
1171 }
1172 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1173
1174 for (i = 0; i < num_rqst; i++)
1175 cifs_save_when_sent(midQ[i]);
1176
1177 if (rc < 0) {
1178 revert_current_mid(server, num_rqst);
1179 server->sequence_number -= 2;
1180 }
1181
1182 cifs_server_unlock(server);
1183
1184 /*
1185 * If sending failed for some reason or it is an oplock break that we
1186 * will not receive a response to - return credits back
1187 */
1188 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1189 for (i = 0; i < num_rqst; i++)
1190 add_credits(server, &credits[i], optype);
1191 goto out;
1192 }
1193
1194 /*
1195 * At this point the request is passed to the network stack - we assume
1196 * that any credits taken from the server structure on the client have
1197 * been spent and we can't return them back. Once we receive responses
1198 * we will collect credits granted by the server in the mid callbacks
1199 * and add those credits to the server structure.
1200 */
1201
1202 /*
1203 * Compounding is never used during session establish.
1204 */
1205 spin_lock(&ses->ses_lock);
1206 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1207 spin_unlock(&ses->ses_lock);
1208
1209 cifs_server_lock(server);
1210 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1211 cifs_server_unlock(server);
1212
1213 spin_lock(&ses->ses_lock);
1214 }
1215 spin_unlock(&ses->ses_lock);
1216
1217 for (i = 0; i < num_rqst; i++) {
1218 rc = wait_for_response(server, midQ[i]);
1219 if (rc != 0)
1220 break;
1221 }
1222 if (rc != 0) {
1223 for (; i < num_rqst; i++) {
1224 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1225 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1226 send_cancel(server, &rqst[i], midQ[i]);
1227 spin_lock(&server->mid_lock);
1228 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1229 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1230 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1231 midQ[i]->callback = cifs_cancelled_callback;
1232 cancelled_mid[i] = true;
1233 credits[i].value = 0;
1234 }
1235 spin_unlock(&server->mid_lock);
1236 }
1237 }
1238
1239 for (i = 0; i < num_rqst; i++) {
1240 if (rc < 0)
1241 goto out;
1242
1243 rc = cifs_sync_mid_result(midQ[i], server);
1244 if (rc != 0) {
1245 /* mark this mid as cancelled to not free it below */
1246 cancelled_mid[i] = true;
1247 goto out;
1248 }
1249
1250 if (!midQ[i]->resp_buf ||
1251 midQ[i]->mid_state != MID_RESPONSE_READY) {
1252 rc = -EIO;
1253 cifs_dbg(FYI, "Bad MID state?\n");
1254 goto out;
1255 }
1256
1257 buf = (char *)midQ[i]->resp_buf;
1258 resp_iov[i].iov_base = buf;
1259 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1260 HEADER_PREAMBLE_SIZE(server);
1261
1262 if (midQ[i]->large_buf)
1263 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1264 else
1265 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1266
1267 rc = server->ops->check_receive(midQ[i], server,
1268 flags & CIFS_LOG_ERROR);
1269
1270 /* mark it so buf will not be freed by delete_mid */
1271 if ((flags & CIFS_NO_RSP_BUF) == 0)
1272 midQ[i]->resp_buf = NULL;
1273
1274 }
1275
1276 /*
1277 * Compounding is never used during session establish.
1278 */
1279 spin_lock(&ses->ses_lock);
1280 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1281 struct kvec iov = {
1282 .iov_base = resp_iov[0].iov_base,
1283 .iov_len = resp_iov[0].iov_len
1284 };
1285 spin_unlock(&ses->ses_lock);
1286 cifs_server_lock(server);
1287 smb311_update_preauth_hash(ses, server, &iov, 1);
1288 cifs_server_unlock(server);
1289 spin_lock(&ses->ses_lock);
1290 }
1291 spin_unlock(&ses->ses_lock);
1292
1293 out:
1294 /*
1295 * This will dequeue all mids. After this it is important that the
1296 * demultiplex_thread will not process any of these mids any further.
1297 * This is prevented above by using a noop callback that will not
1298 * wake this thread except for the very last PDU.
1299 */
1300 for (i = 0; i < num_rqst; i++) {
1301 if (!cancelled_mid[i])
1302 delete_mid(midQ[i]);
1303 }
1304
1305 return rc;
1306 }
1307
1308 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1309 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1310 struct TCP_Server_Info *server,
1311 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1312 struct kvec *resp_iov)
1313 {
1314 return compound_send_recv(xid, ses, server, flags, 1,
1315 rqst, resp_buf_type, resp_iov);
1316 }
1317
1318 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1319 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1320 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1321 const int flags, struct kvec *resp_iov)
1322 {
1323 struct smb_rqst rqst;
1324 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1325 int rc;
1326
1327 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1328 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1329 GFP_KERNEL);
1330 if (!new_iov) {
1331 /* otherwise cifs_send_recv below sets resp_buf_type */
1332 *resp_buf_type = CIFS_NO_BUFFER;
1333 return -ENOMEM;
1334 }
1335 } else
1336 new_iov = s_iov;
1337
1338 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1339 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1340
1341 new_iov[0].iov_base = new_iov[1].iov_base;
1342 new_iov[0].iov_len = 4;
1343 new_iov[1].iov_base += 4;
1344 new_iov[1].iov_len -= 4;
1345
1346 memset(&rqst, 0, sizeof(struct smb_rqst));
1347 rqst.rq_iov = new_iov;
1348 rqst.rq_nvec = n_vec + 1;
1349
1350 rc = cifs_send_recv(xid, ses, ses->server,
1351 &rqst, resp_buf_type, flags, resp_iov);
1352 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1353 kfree(new_iov);
1354 return rc;
1355 }
1356
1357 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1358 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1359 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1360 int *pbytes_returned, const int flags)
1361 {
1362 int rc = 0;
1363 struct mid_q_entry *midQ;
1364 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1365 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1366 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1367 struct cifs_credits credits = { .value = 1, .instance = 0 };
1368 struct TCP_Server_Info *server;
1369
1370 if (ses == NULL) {
1371 cifs_dbg(VFS, "Null smb session\n");
1372 return -EIO;
1373 }
1374 server = ses->server;
1375 if (server == NULL) {
1376 cifs_dbg(VFS, "Null tcp session\n");
1377 return -EIO;
1378 }
1379
1380 spin_lock(&server->srv_lock);
1381 if (server->tcpStatus == CifsExiting) {
1382 spin_unlock(&server->srv_lock);
1383 return -ENOENT;
1384 }
1385 spin_unlock(&server->srv_lock);
1386
1387 /* Ensure that we do not send more than 50 overlapping requests
1388 to the same server. We may make this configurable later or
1389 use ses->maxReq */
1390
1391 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1392 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1393 len);
1394 return -EIO;
1395 }
1396
1397 rc = wait_for_free_request(server, flags, &credits.instance);
1398 if (rc)
1399 return rc;
1400
1401 /* make sure that we sign in the same order that we send on this socket
1402 and avoid races inside tcp sendmsg code that could cause corruption
1403 of smb data */
1404
1405 cifs_server_lock(server);
1406
1407 rc = allocate_mid(ses, in_buf, &midQ);
1408 if (rc) {
1409 cifs_server_unlock(server);
1410 /* Update # of requests on wire to server */
1411 add_credits(server, &credits, 0);
1412 return rc;
1413 }
1414
1415 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1416 if (rc) {
1417 cifs_server_unlock(server);
1418 goto out;
1419 }
1420
1421 midQ->mid_state = MID_REQUEST_SUBMITTED;
1422
1423 rc = smb_send(server, in_buf, len);
1424 cifs_save_when_sent(midQ);
1425
1426 if (rc < 0)
1427 server->sequence_number -= 2;
1428
1429 cifs_server_unlock(server);
1430
1431 if (rc < 0)
1432 goto out;
1433
1434 rc = wait_for_response(server, midQ);
1435 if (rc != 0) {
1436 send_cancel(server, &rqst, midQ);
1437 spin_lock(&server->mid_lock);
1438 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1439 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1440 /* no longer considered to be "in-flight" */
1441 midQ->callback = release_mid;
1442 spin_unlock(&server->mid_lock);
1443 add_credits(server, &credits, 0);
1444 return rc;
1445 }
1446 spin_unlock(&server->mid_lock);
1447 }
1448
1449 rc = cifs_sync_mid_result(midQ, server);
1450 if (rc != 0) {
1451 add_credits(server, &credits, 0);
1452 return rc;
1453 }
1454
1455 if (!midQ->resp_buf || !out_buf ||
1456 midQ->mid_state != MID_RESPONSE_READY) {
1457 rc = -EIO;
1458 cifs_server_dbg(VFS, "Bad MID state?\n");
1459 goto out;
1460 }
1461
1462 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1463 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1464 rc = cifs_check_receive(midQ, server, 0);
1465 out:
1466 delete_mid(midQ);
1467 add_credits(server, &credits, 0);
1468
1469 return rc;
1470 }
1471
1472 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1473 blocking lock to return. */
1474
1475 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1476 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1477 struct smb_hdr *in_buf,
1478 struct smb_hdr *out_buf)
1479 {
1480 int bytes_returned;
1481 struct cifs_ses *ses = tcon->ses;
1482 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1483
1484 /* We just modify the current in_buf to change
1485 the type of lock from LOCKING_ANDX_SHARED_LOCK
1486 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1487 LOCKING_ANDX_CANCEL_LOCK. */
1488
1489 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1490 pSMB->Timeout = 0;
1491 pSMB->hdr.Mid = get_next_mid(ses->server);
1492
1493 return SendReceive(xid, ses, in_buf, out_buf,
1494 &bytes_returned, 0);
1495 }
1496
1497 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1498 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1499 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1500 int *pbytes_returned)
1501 {
1502 int rc = 0;
1503 int rstart = 0;
1504 struct mid_q_entry *midQ;
1505 struct cifs_ses *ses;
1506 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1507 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1508 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1509 unsigned int instance;
1510 struct TCP_Server_Info *server;
1511
1512 if (tcon == NULL || tcon->ses == NULL) {
1513 cifs_dbg(VFS, "Null smb session\n");
1514 return -EIO;
1515 }
1516 ses = tcon->ses;
1517 server = ses->server;
1518
1519 if (server == NULL) {
1520 cifs_dbg(VFS, "Null tcp session\n");
1521 return -EIO;
1522 }
1523
1524 spin_lock(&server->srv_lock);
1525 if (server->tcpStatus == CifsExiting) {
1526 spin_unlock(&server->srv_lock);
1527 return -ENOENT;
1528 }
1529 spin_unlock(&server->srv_lock);
1530
1531 /* Ensure that we do not send more than 50 overlapping requests
1532 to the same server. We may make this configurable later or
1533 use ses->maxReq */
1534
1535 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1536 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1537 len);
1538 return -EIO;
1539 }
1540
1541 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1542 if (rc)
1543 return rc;
1544
1545 /* make sure that we sign in the same order that we send on this socket
1546 and avoid races inside tcp sendmsg code that could cause corruption
1547 of smb data */
1548
1549 cifs_server_lock(server);
1550
1551 rc = allocate_mid(ses, in_buf, &midQ);
1552 if (rc) {
1553 cifs_server_unlock(server);
1554 return rc;
1555 }
1556
1557 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1558 if (rc) {
1559 delete_mid(midQ);
1560 cifs_server_unlock(server);
1561 return rc;
1562 }
1563
1564 midQ->mid_state = MID_REQUEST_SUBMITTED;
1565 rc = smb_send(server, in_buf, len);
1566 cifs_save_when_sent(midQ);
1567
1568 if (rc < 0)
1569 server->sequence_number -= 2;
1570
1571 cifs_server_unlock(server);
1572
1573 if (rc < 0) {
1574 delete_mid(midQ);
1575 return rc;
1576 }
1577
1578 /* Wait for a reply - allow signals to interrupt. */
1579 rc = wait_event_interruptible(server->response_q,
1580 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1581 midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1582 ((server->tcpStatus != CifsGood) &&
1583 (server->tcpStatus != CifsNew)));
1584
1585 /* Were we interrupted by a signal ? */
1586 spin_lock(&server->srv_lock);
1587 if ((rc == -ERESTARTSYS) &&
1588 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1589 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1590 ((server->tcpStatus == CifsGood) ||
1591 (server->tcpStatus == CifsNew))) {
1592 spin_unlock(&server->srv_lock);
1593
1594 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1595 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1596 blocking lock to return. */
1597 rc = send_cancel(server, &rqst, midQ);
1598 if (rc) {
1599 delete_mid(midQ);
1600 return rc;
1601 }
1602 } else {
1603 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1604 to cause the blocking lock to return. */
1605
1606 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1607
1608 /* If we get -ENOLCK back the lock may have
1609 already been removed. Don't exit in this case. */
1610 if (rc && rc != -ENOLCK) {
1611 delete_mid(midQ);
1612 return rc;
1613 }
1614 }
1615
1616 rc = wait_for_response(server, midQ);
1617 if (rc) {
1618 send_cancel(server, &rqst, midQ);
1619 spin_lock(&server->mid_lock);
1620 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1621 midQ->mid_state == MID_RESPONSE_RECEIVED) {
1622 /* no longer considered to be "in-flight" */
1623 midQ->callback = release_mid;
1624 spin_unlock(&server->mid_lock);
1625 return rc;
1626 }
1627 spin_unlock(&server->mid_lock);
1628 }
1629
1630 /* We got the response - restart system call. */
1631 rstart = 1;
1632 spin_lock(&server->srv_lock);
1633 }
1634 spin_unlock(&server->srv_lock);
1635
1636 rc = cifs_sync_mid_result(midQ, server);
1637 if (rc != 0)
1638 return rc;
1639
1640 /* rcvd frame is ok */
1641 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1642 rc = -EIO;
1643 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1644 goto out;
1645 }
1646
1647 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1648 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1649 rc = cifs_check_receive(midQ, server, 0);
1650 out:
1651 delete_mid(midQ);
1652 if (rstart && rc == -EACCES)
1653 return -ERESTARTSYS;
1654 return rc;
1655 }
1656
1657 /*
1658 * Discard any remaining data in the current SMB. To do this, we borrow the
1659 * current bigbuf.
1660 */
1661 int
cifs_discard_remaining_data(struct TCP_Server_Info * server)1662 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1663 {
1664 unsigned int rfclen = server->pdu_size;
1665 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1666 server->total_read;
1667
1668 while (remaining > 0) {
1669 ssize_t length;
1670
1671 length = cifs_discard_from_socket(server,
1672 min_t(size_t, remaining,
1673 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1674 if (length < 0)
1675 return length;
1676 server->total_read += length;
1677 remaining -= length;
1678 }
1679
1680 return 0;
1681 }
1682
1683 static int
__cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid,bool malformed)1684 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1685 bool malformed)
1686 {
1687 int length;
1688
1689 length = cifs_discard_remaining_data(server);
1690 dequeue_mid(mid, malformed);
1691 mid->resp_buf = server->smallbuf;
1692 server->smallbuf = NULL;
1693 return length;
1694 }
1695
1696 static int
cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1697 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1698 {
1699 struct cifs_io_subrequest *rdata = mid->callback_data;
1700
1701 return __cifs_readv_discard(server, mid, rdata->result);
1702 }
1703
1704 int
cifs_readv_receive(struct TCP_Server_Info * server,struct mid_q_entry * mid)1705 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1706 {
1707 int length, len;
1708 unsigned int data_offset, data_len;
1709 struct cifs_io_subrequest *rdata = mid->callback_data;
1710 char *buf = server->smallbuf;
1711 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1712 bool use_rdma_mr = false;
1713
1714 cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
1715 __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
1716
1717 /*
1718 * read the rest of READ_RSP header (sans Data array), or whatever we
1719 * can if there's not enough data. At this point, we've read down to
1720 * the Mid.
1721 */
1722 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1723 HEADER_SIZE(server) + 1;
1724
1725 length = cifs_read_from_socket(server,
1726 buf + HEADER_SIZE(server) - 1, len);
1727 if (length < 0)
1728 return length;
1729 server->total_read += length;
1730
1731 if (server->ops->is_session_expired &&
1732 server->ops->is_session_expired(buf)) {
1733 cifs_reconnect(server, true);
1734 return -1;
1735 }
1736
1737 if (server->ops->is_status_pending &&
1738 server->ops->is_status_pending(buf, server)) {
1739 cifs_discard_remaining_data(server);
1740 return -1;
1741 }
1742
1743 /* set up first two iov for signature check and to get credits */
1744 rdata->iov[0].iov_base = buf;
1745 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1746 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1747 rdata->iov[1].iov_len =
1748 server->total_read - HEADER_PREAMBLE_SIZE(server);
1749 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1750 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1751 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1752 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1753
1754 /* Was the SMB read successful? */
1755 rdata->result = server->ops->map_error(buf, false);
1756 if (rdata->result != 0) {
1757 cifs_dbg(FYI, "%s: server returned error %d\n",
1758 __func__, rdata->result);
1759 /* normal error on read response */
1760 return __cifs_readv_discard(server, mid, false);
1761 }
1762
1763 /* Is there enough to get to the rest of the READ_RSP header? */
1764 if (server->total_read < server->vals->read_rsp_size) {
1765 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1766 __func__, server->total_read,
1767 server->vals->read_rsp_size);
1768 rdata->result = -EIO;
1769 return cifs_readv_discard(server, mid);
1770 }
1771
1772 data_offset = server->ops->read_data_offset(buf) +
1773 HEADER_PREAMBLE_SIZE(server);
1774 if (data_offset < server->total_read) {
1775 /*
1776 * win2k8 sometimes sends an offset of 0 when the read
1777 * is beyond the EOF. Treat it as if the data starts just after
1778 * the header.
1779 */
1780 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1781 __func__, data_offset);
1782 data_offset = server->total_read;
1783 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1784 /* data_offset is beyond the end of smallbuf */
1785 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1786 __func__, data_offset);
1787 rdata->result = -EIO;
1788 return cifs_readv_discard(server, mid);
1789 }
1790
1791 cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1792 __func__, server->total_read, data_offset);
1793
1794 len = data_offset - server->total_read;
1795 if (len > 0) {
1796 /* read any junk before data into the rest of smallbuf */
1797 length = cifs_read_from_socket(server,
1798 buf + server->total_read, len);
1799 if (length < 0)
1800 return length;
1801 server->total_read += length;
1802 }
1803
1804 /* how much data is in the response? */
1805 #ifdef CONFIG_CIFS_SMB_DIRECT
1806 use_rdma_mr = rdata->mr;
1807 #endif
1808 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1809 if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1810 /* data_len is corrupt -- discard frame */
1811 rdata->result = -EIO;
1812 return cifs_readv_discard(server, mid);
1813 }
1814
1815 #ifdef CONFIG_CIFS_SMB_DIRECT
1816 if (rdata->mr)
1817 length = data_len; /* An RDMA read is already done. */
1818 else
1819 #endif
1820 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
1821 data_len);
1822 if (length > 0)
1823 rdata->got_bytes += length;
1824 server->total_read += length;
1825
1826 cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1827 server->total_read, buflen, data_len);
1828
1829 /* discard anything left over */
1830 if (server->total_read < buflen)
1831 return cifs_readv_discard(server, mid);
1832
1833 dequeue_mid(mid, false);
1834 mid->resp_buf = server->smallbuf;
1835 server->smallbuf = NULL;
1836 return length;
1837 }
1838