1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2009, 2013
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 */
12
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
17
18 #include <linux/fs.h>
19 #include <linux/kernel.h>
20 #include <linux/vfs.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uaccess.h>
23 #include <linux/uuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/xattr.h>
26 #include <linux/netfs.h>
27 #include <trace/events/netfs.h>
28 #include "cifsglob.h"
29 #include "cifsacl.h"
30 #include "cifsproto.h"
31 #include "smb2proto.h"
32 #include "cifs_unicode.h"
33 #include "cifs_debug.h"
34 #include "ntlmssp.h"
35 #include "../common/smb2status.h"
36 #include "smb2glob.h"
37 #include "cifspdu.h"
38 #include "cifs_spnego.h"
39 #include "smbdirect.h"
40 #include "trace.h"
41 #ifdef CONFIG_CIFS_DFS_UPCALL
42 #include "dfs_cache.h"
43 #endif
44 #include "cached_dir.h"
45 #include "compress.h"
46
47 /*
48 * The following table defines the expected "StructureSize" of SMB2 requests
49 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
50 *
51 * Note that commands are defined in smb2pdu.h in le16 but the array below is
52 * indexed by command in host byte order.
53 */
54 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
55 /* SMB2_NEGOTIATE */ 36,
56 /* SMB2_SESSION_SETUP */ 25,
57 /* SMB2_LOGOFF */ 4,
58 /* SMB2_TREE_CONNECT */ 9,
59 /* SMB2_TREE_DISCONNECT */ 4,
60 /* SMB2_CREATE */ 57,
61 /* SMB2_CLOSE */ 24,
62 /* SMB2_FLUSH */ 24,
63 /* SMB2_READ */ 49,
64 /* SMB2_WRITE */ 49,
65 /* SMB2_LOCK */ 48,
66 /* SMB2_IOCTL */ 57,
67 /* SMB2_CANCEL */ 4,
68 /* SMB2_ECHO */ 4,
69 /* SMB2_QUERY_DIRECTORY */ 33,
70 /* SMB2_CHANGE_NOTIFY */ 32,
71 /* SMB2_QUERY_INFO */ 41,
72 /* SMB2_SET_INFO */ 33,
73 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
74 };
75
smb3_encryption_required(const struct cifs_tcon * tcon)76 int smb3_encryption_required(const struct cifs_tcon *tcon)
77 {
78 if (!tcon || !tcon->ses)
79 return 0;
80 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
81 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
82 return 1;
83 if (tcon->seal &&
84 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
85 return 1;
86 if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
87 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
88 return 1;
89 return 0;
90 }
91
92 static void
smb2_hdr_assemble(struct smb2_hdr * shdr,__le16 smb2_cmd,const struct cifs_tcon * tcon,struct TCP_Server_Info * server)93 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
94 const struct cifs_tcon *tcon,
95 struct TCP_Server_Info *server)
96 {
97 struct smb3_hdr_req *smb3_hdr;
98
99 shdr->ProtocolId = SMB2_PROTO_NUMBER;
100 shdr->StructureSize = cpu_to_le16(64);
101 shdr->Command = smb2_cmd;
102
103 if (server) {
104 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
105 if (server->dialect >= SMB30_PROT_ID) {
106 smb3_hdr = (struct smb3_hdr_req *)shdr;
107 /*
108 * if primary channel is not set yet, use default
109 * channel for chan sequence num
110 */
111 if (SERVER_IS_CHAN(server))
112 smb3_hdr->ChannelSequence =
113 cpu_to_le16(server->primary_server->channel_sequence_num);
114 else
115 smb3_hdr->ChannelSequence =
116 cpu_to_le16(server->channel_sequence_num);
117 }
118 spin_lock(&server->req_lock);
119 /* Request up to 10 credits but don't go over the limit. */
120 if (server->credits >= server->max_credits)
121 shdr->CreditRequest = cpu_to_le16(0);
122 else
123 shdr->CreditRequest = cpu_to_le16(
124 min_t(int, server->max_credits -
125 server->credits, 10));
126 spin_unlock(&server->req_lock);
127 } else {
128 shdr->CreditRequest = cpu_to_le16(2);
129 }
130 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
131
132 if (!tcon)
133 goto out;
134
135 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
136 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
137 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
138 shdr->CreditCharge = cpu_to_le16(1);
139 /* else CreditCharge MBZ */
140
141 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
142 /* Uid is not converted */
143 if (tcon->ses)
144 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
145
146 /*
147 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
148 * to pass the path on the Open SMB prefixed by \\server\share.
149 * Not sure when we would need to do the augmented path (if ever) and
150 * setting this flag breaks the SMB2 open operation since it is
151 * illegal to send an empty path name (without \\server\share prefix)
152 * when the DFS flag is set in the SMB open header. We could
153 * consider setting the flag on all operations other than open
154 * but it is safer to net set it for now.
155 */
156 /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
157 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
158
159 if (server && server->sign && !smb3_encryption_required(tcon))
160 shdr->Flags |= SMB2_FLAGS_SIGNED;
161 out:
162 return;
163 }
164
165 /* helper function for code reuse */
166 static int
cifs_chan_skip_or_disable(struct cifs_ses * ses,struct TCP_Server_Info * server,bool from_reconnect)167 cifs_chan_skip_or_disable(struct cifs_ses *ses,
168 struct TCP_Server_Info *server,
169 bool from_reconnect)
170 {
171 struct TCP_Server_Info *pserver;
172 unsigned int chan_index;
173
174 if (SERVER_IS_CHAN(server)) {
175 cifs_dbg(VFS,
176 "server %s does not support multichannel anymore. Skip secondary channel\n",
177 ses->server->hostname);
178
179 spin_lock(&ses->chan_lock);
180 chan_index = cifs_ses_get_chan_index(ses, server);
181 if (chan_index == CIFS_INVAL_CHAN_INDEX) {
182 spin_unlock(&ses->chan_lock);
183 goto skip_terminate;
184 }
185
186 ses->chans[chan_index].server = NULL;
187 server->terminate = true;
188 spin_unlock(&ses->chan_lock);
189
190 /*
191 * the above reference of server by channel
192 * needs to be dropped without holding chan_lock
193 * as cifs_put_tcp_session takes a higher lock
194 * i.e. cifs_tcp_ses_lock
195 */
196 cifs_put_tcp_session(server, from_reconnect);
197
198 cifs_signal_cifsd_for_reconnect(server, false);
199
200 /* mark primary server as needing reconnect */
201 pserver = server->primary_server;
202 cifs_signal_cifsd_for_reconnect(pserver, false);
203 skip_terminate:
204 return -EHOSTDOWN;
205 }
206
207 cifs_server_dbg(VFS,
208 "server does not support multichannel anymore. Disable all other channels\n");
209 cifs_disable_secondary_channels(ses);
210
211
212 return 0;
213 }
214
215 static int
smb2_reconnect(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,bool from_reconnect)216 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
217 struct TCP_Server_Info *server, bool from_reconnect)
218 {
219 int rc = 0;
220 struct nls_table *nls_codepage = NULL;
221 struct cifs_ses *ses;
222 int xid;
223
224 /*
225 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
226 * check for tcp and smb session status done differently
227 * for those three - in the calling routine.
228 */
229 if (tcon == NULL)
230 return 0;
231
232 /*
233 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
234 * cifs_tree_connect().
235 */
236 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
237 return 0;
238
239 spin_lock(&tcon->tc_lock);
240 if (tcon->status == TID_EXITING) {
241 /*
242 * only tree disconnect allowed when disconnecting ...
243 */
244 if (smb2_command != SMB2_TREE_DISCONNECT) {
245 spin_unlock(&tcon->tc_lock);
246 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
247 smb2_command);
248 return -ENODEV;
249 }
250 }
251 spin_unlock(&tcon->tc_lock);
252
253 ses = tcon->ses;
254 if (!ses)
255 return -EIO;
256 spin_lock(&ses->ses_lock);
257 if (ses->ses_status == SES_EXITING) {
258 spin_unlock(&ses->ses_lock);
259 return -EIO;
260 }
261 spin_unlock(&ses->ses_lock);
262 if (!ses->server || !server)
263 return -EIO;
264
265 spin_lock(&server->srv_lock);
266 if (server->tcpStatus == CifsNeedReconnect) {
267 /*
268 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
269 * here since they are implicitly done when session drops.
270 */
271 switch (smb2_command) {
272 /*
273 * BB Should we keep oplock break and add flush to exceptions?
274 */
275 case SMB2_TREE_DISCONNECT:
276 case SMB2_CANCEL:
277 case SMB2_CLOSE:
278 case SMB2_OPLOCK_BREAK:
279 spin_unlock(&server->srv_lock);
280 return -EAGAIN;
281 }
282 }
283
284 /* if server is marked for termination, cifsd will cleanup */
285 if (server->terminate) {
286 spin_unlock(&server->srv_lock);
287 return -EHOSTDOWN;
288 }
289 spin_unlock(&server->srv_lock);
290
291 again:
292 rc = cifs_wait_for_server_reconnect(server, tcon->retry);
293 if (rc)
294 return rc;
295
296 spin_lock(&ses->chan_lock);
297 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
298 spin_unlock(&ses->chan_lock);
299 return 0;
300 }
301 spin_unlock(&ses->chan_lock);
302 cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
303 tcon->ses->chans_need_reconnect,
304 tcon->need_reconnect);
305
306 mutex_lock(&ses->session_mutex);
307 /*
308 * if this is called by delayed work, and the channel has been disabled
309 * in parallel, the delayed work can continue to execute in parallel
310 * there's a chance that this channel may not exist anymore
311 */
312 spin_lock(&server->srv_lock);
313 if (server->tcpStatus == CifsExiting) {
314 spin_unlock(&server->srv_lock);
315 mutex_unlock(&ses->session_mutex);
316 rc = -EHOSTDOWN;
317 goto out;
318 }
319
320 /*
321 * Recheck after acquire mutex. If another thread is negotiating
322 * and the server never sends an answer the socket will be closed
323 * and tcpStatus set to reconnect.
324 */
325 if (server->tcpStatus == CifsNeedReconnect) {
326 spin_unlock(&server->srv_lock);
327 mutex_unlock(&ses->session_mutex);
328
329 if (tcon->retry)
330 goto again;
331
332 rc = -EHOSTDOWN;
333 goto out;
334 }
335 spin_unlock(&server->srv_lock);
336
337 nls_codepage = ses->local_nls;
338
339 /*
340 * need to prevent multiple threads trying to simultaneously
341 * reconnect the same SMB session
342 */
343 spin_lock(&ses->ses_lock);
344 spin_lock(&ses->chan_lock);
345 if (!cifs_chan_needs_reconnect(ses, server) &&
346 ses->ses_status == SES_GOOD) {
347 spin_unlock(&ses->chan_lock);
348 spin_unlock(&ses->ses_lock);
349 /* this means that we only need to tree connect */
350 if (tcon->need_reconnect)
351 goto skip_sess_setup;
352
353 mutex_unlock(&ses->session_mutex);
354 goto out;
355 }
356 spin_unlock(&ses->chan_lock);
357 spin_unlock(&ses->ses_lock);
358
359 rc = cifs_negotiate_protocol(0, ses, server);
360 if (!rc) {
361 /*
362 * if server stopped supporting multichannel
363 * and the first channel reconnected, disable all the others.
364 */
365 if (ses->chan_count > 1 &&
366 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
367 rc = cifs_chan_skip_or_disable(ses, server,
368 from_reconnect);
369 if (rc) {
370 mutex_unlock(&ses->session_mutex);
371 goto out;
372 }
373 }
374
375 rc = cifs_setup_session(0, ses, server, nls_codepage);
376 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
377 /*
378 * Try alternate password for next reconnect (key rotation
379 * could be enabled on the server e.g.) if an alternate
380 * password is available and the current password is expired,
381 * but do not swap on non pwd related errors like host down
382 */
383 if (ses->password2)
384 swap(ses->password2, ses->password);
385 }
386
387 if ((rc == -EACCES) && !tcon->retry) {
388 mutex_unlock(&ses->session_mutex);
389 rc = -EHOSTDOWN;
390 goto failed;
391 } else if (rc) {
392 mutex_unlock(&ses->session_mutex);
393 goto out;
394 }
395 } else {
396 mutex_unlock(&ses->session_mutex);
397 goto out;
398 }
399
400 skip_sess_setup:
401 if (!tcon->need_reconnect) {
402 mutex_unlock(&ses->session_mutex);
403 goto out;
404 }
405 cifs_mark_open_files_invalid(tcon);
406 if (tcon->use_persistent)
407 tcon->need_reopen_files = true;
408
409 rc = cifs_tree_connect(0, tcon, nls_codepage);
410
411 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
412 if (rc) {
413 /* If sess reconnected but tcon didn't, something strange ... */
414 mutex_unlock(&ses->session_mutex);
415 cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
416 goto out;
417 }
418
419 spin_lock(&ses->ses_lock);
420 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
421 spin_unlock(&ses->ses_lock);
422 mutex_unlock(&ses->session_mutex);
423 goto skip_add_channels;
424 }
425 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
426 spin_unlock(&ses->ses_lock);
427
428 if (!rc &&
429 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
430 server->ops->query_server_interfaces) {
431 mutex_unlock(&ses->session_mutex);
432
433 /*
434 * query server network interfaces, in case they change
435 */
436 xid = get_xid();
437 rc = server->ops->query_server_interfaces(xid, tcon, false);
438 free_xid(xid);
439
440 if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
441 /*
442 * some servers like Azure SMB server do not advertise
443 * that multichannel has been disabled with server
444 * capabilities, rather return STATUS_NOT_IMPLEMENTED.
445 * treat this as server not supporting multichannel
446 */
447
448 rc = cifs_chan_skip_or_disable(ses, server,
449 from_reconnect);
450 goto skip_add_channels;
451 } else if (rc)
452 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
453 __func__, rc);
454
455 if (ses->chan_max > ses->chan_count &&
456 ses->iface_count &&
457 !SERVER_IS_CHAN(server)) {
458 if (ses->chan_count == 1) {
459 cifs_server_dbg(VFS, "supports multichannel now\n");
460 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
461 (SMB_INTERFACE_POLL_INTERVAL * HZ));
462 }
463
464 cifs_try_adding_channels(ses);
465 }
466 } else {
467 mutex_unlock(&ses->session_mutex);
468 }
469
470 skip_add_channels:
471 spin_lock(&ses->ses_lock);
472 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
473 spin_unlock(&ses->ses_lock);
474
475 if (smb2_command != SMB2_INTERNAL_CMD)
476 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
477
478 atomic_inc(&tconInfoReconnectCount);
479 out:
480 /*
481 * Check if handle based operation so we know whether we can continue
482 * or not without returning to caller to reset file handle.
483 */
484 /*
485 * BB Is flush done by server on drop of tcp session? Should we special
486 * case it and skip above?
487 */
488 switch (smb2_command) {
489 case SMB2_FLUSH:
490 case SMB2_READ:
491 case SMB2_WRITE:
492 case SMB2_LOCK:
493 case SMB2_QUERY_DIRECTORY:
494 case SMB2_CHANGE_NOTIFY:
495 case SMB2_QUERY_INFO:
496 case SMB2_SET_INFO:
497 rc = -EAGAIN;
498 }
499 failed:
500 return rc;
501 }
502
503 static void
fill_small_buf(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void * buf,unsigned int * total_len)504 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
505 struct TCP_Server_Info *server,
506 void *buf,
507 unsigned int *total_len)
508 {
509 struct smb2_pdu *spdu = buf;
510 /* lookup word count ie StructureSize from table */
511 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
512
513 /*
514 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
515 * largest operations (Create)
516 */
517 memset(buf, 0, 256);
518
519 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
520 spdu->StructureSize2 = cpu_to_le16(parmsize);
521
522 *total_len = parmsize + sizeof(struct smb2_hdr);
523 }
524
525 /*
526 * Allocate and return pointer to an SMB request hdr, and set basic
527 * SMB information in the SMB header. If the return code is zero, this
528 * function must have filled in request_buf pointer.
529 */
__smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)530 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
531 struct TCP_Server_Info *server,
532 void **request_buf, unsigned int *total_len)
533 {
534 /* BB eventually switch this to SMB2 specific small buf size */
535 switch (smb2_command) {
536 case SMB2_SET_INFO:
537 case SMB2_QUERY_INFO:
538 *request_buf = cifs_buf_get();
539 break;
540 default:
541 *request_buf = cifs_small_buf_get();
542 break;
543 }
544 if (*request_buf == NULL) {
545 /* BB should we add a retry in here if not a writepage? */
546 return -ENOMEM;
547 }
548
549 fill_small_buf(smb2_command, tcon, server,
550 (struct smb2_hdr *)(*request_buf),
551 total_len);
552
553 if (tcon != NULL) {
554 uint16_t com_code = le16_to_cpu(smb2_command);
555 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
556 cifs_stats_inc(&tcon->num_smbs_sent);
557 }
558
559 return 0;
560 }
561
smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)562 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
563 struct TCP_Server_Info *server,
564 void **request_buf, unsigned int *total_len)
565 {
566 int rc;
567
568 rc = smb2_reconnect(smb2_command, tcon, server, false);
569 if (rc)
570 return rc;
571
572 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
573 total_len);
574 }
575
smb2_ioctl_req_init(u32 opcode,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)576 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
577 struct TCP_Server_Info *server,
578 void **request_buf, unsigned int *total_len)
579 {
580 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
581 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
582 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
583 request_buf, total_len);
584 }
585 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
586 request_buf, total_len);
587 }
588
589 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
590
591 static void
build_preauth_ctxt(struct smb2_preauth_neg_context * pneg_ctxt)592 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
593 {
594 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
595 pneg_ctxt->DataLength = cpu_to_le16(38);
596 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
597 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
598 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
599 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
600 }
601
602 static void
build_compression_ctxt(struct smb2_compression_capabilities_context * pneg_ctxt)603 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
604 {
605 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
606 pneg_ctxt->DataLength =
607 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
608 - sizeof(struct smb2_neg_context));
609 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
610 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
611 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
612 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
613 }
614
615 static unsigned int
build_signing_ctxt(struct smb2_signing_capabilities * pneg_ctxt)616 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
617 {
618 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
619 unsigned short num_algs = 1; /* number of signing algorithms sent */
620
621 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
622 /*
623 * Context Data length must be rounded to multiple of 8 for some servers
624 */
625 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
626 sizeof(struct smb2_neg_context) +
627 (num_algs * sizeof(u16)), 8));
628 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
629 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
630
631 ctxt_len += sizeof(__le16) * num_algs;
632 ctxt_len = ALIGN(ctxt_len, 8);
633 return ctxt_len;
634 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
635 }
636
637 static void
build_encrypt_ctxt(struct smb2_encryption_neg_context * pneg_ctxt)638 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
639 {
640 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
641 if (require_gcm_256) {
642 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
643 pneg_ctxt->CipherCount = cpu_to_le16(1);
644 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
645 } else if (enable_gcm_256) {
646 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
647 pneg_ctxt->CipherCount = cpu_to_le16(3);
648 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
649 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
650 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
651 } else {
652 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
653 pneg_ctxt->CipherCount = cpu_to_le16(2);
654 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
655 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
656 }
657 }
658
659 static unsigned int
build_netname_ctxt(struct smb2_netname_neg_context * pneg_ctxt,char * hostname)660 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
661 {
662 struct nls_table *cp = load_nls_default();
663
664 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
665
666 /* copy up to max of first 100 bytes of server name to NetName field */
667 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
668 /* context size is DataLength + minimal smb2_neg_context */
669 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
670 }
671
672 static void
build_posix_ctxt(struct smb2_posix_neg_context * pneg_ctxt)673 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
674 {
675 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
676 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
677 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
678 pneg_ctxt->Name[0] = 0x93;
679 pneg_ctxt->Name[1] = 0xAD;
680 pneg_ctxt->Name[2] = 0x25;
681 pneg_ctxt->Name[3] = 0x50;
682 pneg_ctxt->Name[4] = 0x9C;
683 pneg_ctxt->Name[5] = 0xB4;
684 pneg_ctxt->Name[6] = 0x11;
685 pneg_ctxt->Name[7] = 0xE7;
686 pneg_ctxt->Name[8] = 0xB4;
687 pneg_ctxt->Name[9] = 0x23;
688 pneg_ctxt->Name[10] = 0x83;
689 pneg_ctxt->Name[11] = 0xDE;
690 pneg_ctxt->Name[12] = 0x96;
691 pneg_ctxt->Name[13] = 0x8B;
692 pneg_ctxt->Name[14] = 0xCD;
693 pneg_ctxt->Name[15] = 0x7C;
694 }
695
696 static void
assemble_neg_contexts(struct smb2_negotiate_req * req,struct TCP_Server_Info * server,unsigned int * total_len)697 assemble_neg_contexts(struct smb2_negotiate_req *req,
698 struct TCP_Server_Info *server, unsigned int *total_len)
699 {
700 unsigned int ctxt_len, neg_context_count;
701 struct TCP_Server_Info *pserver;
702 char *pneg_ctxt;
703 char *hostname;
704
705 if (*total_len > 200) {
706 /* In case length corrupted don't want to overrun smb buffer */
707 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
708 return;
709 }
710
711 /*
712 * round up total_len of fixed part of SMB3 negotiate request to 8
713 * byte boundary before adding negotiate contexts
714 */
715 *total_len = ALIGN(*total_len, 8);
716
717 pneg_ctxt = (*total_len) + (char *)req;
718 req->NegotiateContextOffset = cpu_to_le32(*total_len);
719
720 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
721 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
722 *total_len += ctxt_len;
723 pneg_ctxt += ctxt_len;
724
725 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
726 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
727 *total_len += ctxt_len;
728 pneg_ctxt += ctxt_len;
729
730 /*
731 * secondary channels don't have the hostname field populated
732 * use the hostname field in the primary channel instead
733 */
734 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
735 cifs_server_lock(pserver);
736 hostname = pserver->hostname;
737 if (hostname && (hostname[0] != 0)) {
738 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
739 hostname);
740 *total_len += ctxt_len;
741 pneg_ctxt += ctxt_len;
742 neg_context_count = 3;
743 } else
744 neg_context_count = 2;
745 cifs_server_unlock(pserver);
746
747 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
748 *total_len += sizeof(struct smb2_posix_neg_context);
749 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
750 neg_context_count++;
751
752 if (server->compression.requested) {
753 build_compression_ctxt((struct smb2_compression_capabilities_context *)
754 pneg_ctxt);
755 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
756 *total_len += ctxt_len;
757 pneg_ctxt += ctxt_len;
758 neg_context_count++;
759 }
760
761 if (enable_negotiate_signing) {
762 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
763 pneg_ctxt);
764 *total_len += ctxt_len;
765 pneg_ctxt += ctxt_len;
766 neg_context_count++;
767 }
768
769 /* check for and add transport_capabilities and signing capabilities */
770 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
771
772 }
773
774 /* If invalid preauth context warn but use what we requested, SHA-512 */
decode_preauth_context(struct smb2_preauth_neg_context * ctxt)775 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
776 {
777 unsigned int len = le16_to_cpu(ctxt->DataLength);
778
779 /*
780 * Caller checked that DataLength remains within SMB boundary. We still
781 * need to confirm that one HashAlgorithms member is accounted for.
782 */
783 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
784 pr_warn_once("server sent bad preauth context\n");
785 return;
786 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
787 pr_warn_once("server sent invalid SaltLength\n");
788 return;
789 }
790 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
791 pr_warn_once("Invalid SMB3 hash algorithm count\n");
792 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
793 pr_warn_once("unknown SMB3 hash algorithm\n");
794 }
795
decode_compress_ctx(struct TCP_Server_Info * server,struct smb2_compression_capabilities_context * ctxt)796 static void decode_compress_ctx(struct TCP_Server_Info *server,
797 struct smb2_compression_capabilities_context *ctxt)
798 {
799 unsigned int len = le16_to_cpu(ctxt->DataLength);
800 __le16 alg;
801
802 server->compression.enabled = false;
803
804 /*
805 * Caller checked that DataLength remains within SMB boundary. We still
806 * need to confirm that one CompressionAlgorithms member is accounted
807 * for.
808 */
809 if (len < 10) {
810 pr_warn_once("server sent bad compression cntxt\n");
811 return;
812 }
813
814 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
815 pr_warn_once("invalid SMB3 compress algorithm count\n");
816 return;
817 }
818
819 alg = ctxt->CompressionAlgorithms[0];
820
821 /* 'NONE' (0) compressor type is never negotiated */
822 if (alg == 0 || le16_to_cpu(alg) > 3) {
823 pr_warn_once("invalid compression algorithm '%u'\n", alg);
824 return;
825 }
826
827 server->compression.alg = alg;
828 server->compression.enabled = true;
829 }
830
decode_encrypt_ctx(struct TCP_Server_Info * server,struct smb2_encryption_neg_context * ctxt)831 static int decode_encrypt_ctx(struct TCP_Server_Info *server,
832 struct smb2_encryption_neg_context *ctxt)
833 {
834 unsigned int len = le16_to_cpu(ctxt->DataLength);
835
836 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
837 /*
838 * Caller checked that DataLength remains within SMB boundary. We still
839 * need to confirm that one Cipher flexible array member is accounted
840 * for.
841 */
842 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
843 pr_warn_once("server sent bad crypto ctxt len\n");
844 return -EINVAL;
845 }
846
847 if (le16_to_cpu(ctxt->CipherCount) != 1) {
848 pr_warn_once("Invalid SMB3.11 cipher count\n");
849 return -EINVAL;
850 }
851 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
852 if (require_gcm_256) {
853 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
854 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
855 return -EOPNOTSUPP;
856 }
857 } else if (ctxt->Ciphers[0] == 0) {
858 /*
859 * e.g. if server only supported AES256_CCM (very unlikely)
860 * or server supported no encryption types or had all disabled.
861 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
862 * in which mount requested encryption ("seal") checks later
863 * on during tree connection will return proper rc, but if
864 * seal not requested by client, since server is allowed to
865 * return 0 to indicate no supported cipher, we can't fail here
866 */
867 server->cipher_type = 0;
868 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
869 pr_warn_once("Server does not support requested encryption types\n");
870 return 0;
871 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
872 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
873 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
874 /* server returned a cipher we didn't ask for */
875 pr_warn_once("Invalid SMB3.11 cipher returned\n");
876 return -EINVAL;
877 }
878 server->cipher_type = ctxt->Ciphers[0];
879 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
880 return 0;
881 }
882
decode_signing_ctx(struct TCP_Server_Info * server,struct smb2_signing_capabilities * pctxt)883 static void decode_signing_ctx(struct TCP_Server_Info *server,
884 struct smb2_signing_capabilities *pctxt)
885 {
886 unsigned int len = le16_to_cpu(pctxt->DataLength);
887
888 /*
889 * Caller checked that DataLength remains within SMB boundary. We still
890 * need to confirm that one SigningAlgorithms flexible array member is
891 * accounted for.
892 */
893 if ((len < 4) || (len > 16)) {
894 pr_warn_once("server sent bad signing negcontext\n");
895 return;
896 }
897 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
898 pr_warn_once("Invalid signing algorithm count\n");
899 return;
900 }
901 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
902 pr_warn_once("unknown signing algorithm\n");
903 return;
904 }
905
906 server->signing_negotiated = true;
907 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
908 cifs_dbg(FYI, "signing algorithm %d chosen\n",
909 server->signing_algorithm);
910 }
911
912
smb311_decode_neg_context(struct smb2_negotiate_rsp * rsp,struct TCP_Server_Info * server,unsigned int len_of_smb)913 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
914 struct TCP_Server_Info *server,
915 unsigned int len_of_smb)
916 {
917 struct smb2_neg_context *pctx;
918 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
919 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
920 unsigned int len_of_ctxts, i;
921 int rc = 0;
922
923 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
924 if (len_of_smb <= offset) {
925 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
926 return -EINVAL;
927 }
928
929 len_of_ctxts = len_of_smb - offset;
930
931 for (i = 0; i < ctxt_cnt; i++) {
932 int clen;
933 /* check that offset is not beyond end of SMB */
934 if (len_of_ctxts < sizeof(struct smb2_neg_context))
935 break;
936
937 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
938 clen = sizeof(struct smb2_neg_context)
939 + le16_to_cpu(pctx->DataLength);
940 /*
941 * 2.2.4 SMB2 NEGOTIATE Response
942 * Subsequent negotiate contexts MUST appear at the first 8-byte
943 * aligned offset following the previous negotiate context.
944 */
945 if (i + 1 != ctxt_cnt)
946 clen = ALIGN(clen, 8);
947 if (clen > len_of_ctxts)
948 break;
949
950 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
951 decode_preauth_context(
952 (struct smb2_preauth_neg_context *)pctx);
953 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
954 rc = decode_encrypt_ctx(server,
955 (struct smb2_encryption_neg_context *)pctx);
956 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
957 decode_compress_ctx(server,
958 (struct smb2_compression_capabilities_context *)pctx);
959 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
960 server->posix_ext_supported = true;
961 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
962 decode_signing_ctx(server,
963 (struct smb2_signing_capabilities *)pctx);
964 else
965 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
966 le16_to_cpu(pctx->ContextType));
967 if (rc)
968 break;
969
970 offset += clen;
971 len_of_ctxts -= clen;
972 }
973 return rc;
974 }
975
976 static struct create_posix *
create_posix_buf(umode_t mode)977 create_posix_buf(umode_t mode)
978 {
979 struct create_posix *buf;
980
981 buf = kzalloc(sizeof(struct create_posix),
982 GFP_KERNEL);
983 if (!buf)
984 return NULL;
985
986 buf->ccontext.DataOffset =
987 cpu_to_le16(offsetof(struct create_posix, Mode));
988 buf->ccontext.DataLength = cpu_to_le32(4);
989 buf->ccontext.NameOffset =
990 cpu_to_le16(offsetof(struct create_posix, Name));
991 buf->ccontext.NameLength = cpu_to_le16(16);
992
993 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
994 buf->Name[0] = 0x93;
995 buf->Name[1] = 0xAD;
996 buf->Name[2] = 0x25;
997 buf->Name[3] = 0x50;
998 buf->Name[4] = 0x9C;
999 buf->Name[5] = 0xB4;
1000 buf->Name[6] = 0x11;
1001 buf->Name[7] = 0xE7;
1002 buf->Name[8] = 0xB4;
1003 buf->Name[9] = 0x23;
1004 buf->Name[10] = 0x83;
1005 buf->Name[11] = 0xDE;
1006 buf->Name[12] = 0x96;
1007 buf->Name[13] = 0x8B;
1008 buf->Name[14] = 0xCD;
1009 buf->Name[15] = 0x7C;
1010 buf->Mode = cpu_to_le32(mode);
1011 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
1012 return buf;
1013 }
1014
1015 static int
add_posix_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode)1016 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
1017 {
1018 unsigned int num = *num_iovec;
1019
1020 iov[num].iov_base = create_posix_buf(mode);
1021 if (mode == ACL_NO_MODE)
1022 cifs_dbg(FYI, "%s: no mode\n", __func__);
1023 if (iov[num].iov_base == NULL)
1024 return -ENOMEM;
1025 iov[num].iov_len = sizeof(struct create_posix);
1026 *num_iovec = num + 1;
1027 return 0;
1028 }
1029
1030
1031 /*
1032 *
1033 * SMB2 Worker functions follow:
1034 *
1035 * The general structure of the worker functions is:
1036 * 1) Call smb2_init (assembles SMB2 header)
1037 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
1038 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
1039 * 4) Decode SMB2 command specific fields in the fixed length area
1040 * 5) Decode variable length data area (if any for this SMB2 command type)
1041 * 6) Call free smb buffer
1042 * 7) return
1043 *
1044 */
1045
1046 int
SMB2_negotiate(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)1047 SMB2_negotiate(const unsigned int xid,
1048 struct cifs_ses *ses,
1049 struct TCP_Server_Info *server)
1050 {
1051 struct smb_rqst rqst;
1052 struct smb2_negotiate_req *req;
1053 struct smb2_negotiate_rsp *rsp;
1054 struct kvec iov[1];
1055 struct kvec rsp_iov;
1056 int rc;
1057 int resp_buftype;
1058 int blob_offset, blob_length;
1059 char *security_blob;
1060 int flags = CIFS_NEG_OP;
1061 unsigned int total_len;
1062
1063 cifs_dbg(FYI, "Negotiate protocol\n");
1064
1065 if (!server) {
1066 WARN(1, "%s: server is NULL!\n", __func__);
1067 return -EIO;
1068 }
1069
1070 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
1071 (void **) &req, &total_len);
1072 if (rc)
1073 return rc;
1074
1075 req->hdr.SessionId = 0;
1076
1077 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1078 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1079
1080 if (strcmp(server->vals->version_string,
1081 SMB3ANY_VERSION_STRING) == 0) {
1082 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1083 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1084 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1085 req->DialectCount = cpu_to_le16(3);
1086 total_len += 6;
1087 } else if (strcmp(server->vals->version_string,
1088 SMBDEFAULT_VERSION_STRING) == 0) {
1089 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1090 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1091 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1092 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1093 req->DialectCount = cpu_to_le16(4);
1094 total_len += 8;
1095 } else {
1096 /* otherwise send specific dialect */
1097 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
1098 req->DialectCount = cpu_to_le16(1);
1099 total_len += 2;
1100 }
1101
1102 /* only one of SMB2 signing flags may be set in SMB2 request */
1103 if (ses->sign)
1104 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1105 else if (global_secflags & CIFSSEC_MAY_SIGN)
1106 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1107 else
1108 req->SecurityMode = 0;
1109
1110 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
1111 if (ses->chan_max > 1)
1112 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1113
1114 /* ClientGUID must be zero for SMB2.02 dialect */
1115 if (server->vals->protocol_id == SMB20_PROT_ID)
1116 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
1117 else {
1118 memcpy(req->ClientGUID, server->client_guid,
1119 SMB2_CLIENT_GUID_SIZE);
1120 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
1121 (strcmp(server->vals->version_string,
1122 SMB3ANY_VERSION_STRING) == 0) ||
1123 (strcmp(server->vals->version_string,
1124 SMBDEFAULT_VERSION_STRING) == 0))
1125 assemble_neg_contexts(req, server, &total_len);
1126 }
1127 iov[0].iov_base = (char *)req;
1128 iov[0].iov_len = total_len;
1129
1130 memset(&rqst, 0, sizeof(struct smb_rqst));
1131 rqst.rq_iov = iov;
1132 rqst.rq_nvec = 1;
1133
1134 rc = cifs_send_recv(xid, ses, server,
1135 &rqst, &resp_buftype, flags, &rsp_iov);
1136 cifs_small_buf_release(req);
1137 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
1138 /*
1139 * No tcon so can't do
1140 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1141 */
1142 if (rc == -EOPNOTSUPP) {
1143 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
1144 goto neg_exit;
1145 } else if (rc != 0)
1146 goto neg_exit;
1147
1148 rc = -EIO;
1149 if (strcmp(server->vals->version_string,
1150 SMB3ANY_VERSION_STRING) == 0) {
1151 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
1152 cifs_server_dbg(VFS,
1153 "SMB2 dialect returned but not requested\n");
1154 goto neg_exit;
1155 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
1156 cifs_server_dbg(VFS,
1157 "SMB2.1 dialect returned but not requested\n");
1158 goto neg_exit;
1159 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1160 /* ops set to 3.0 by default for default so update */
1161 server->ops = &smb311_operations;
1162 server->vals = &smb311_values;
1163 }
1164 } else if (strcmp(server->vals->version_string,
1165 SMBDEFAULT_VERSION_STRING) == 0) {
1166 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
1167 cifs_server_dbg(VFS,
1168 "SMB2 dialect returned but not requested\n");
1169 goto neg_exit;
1170 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
1171 /* ops set to 3.0 by default for default so update */
1172 server->ops = &smb21_operations;
1173 server->vals = &smb21_values;
1174 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1175 server->ops = &smb311_operations;
1176 server->vals = &smb311_values;
1177 }
1178 } else if (le16_to_cpu(rsp->DialectRevision) !=
1179 server->vals->protocol_id) {
1180 /* if requested single dialect ensure returned dialect matched */
1181 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
1182 le16_to_cpu(rsp->DialectRevision));
1183 goto neg_exit;
1184 }
1185
1186 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
1187
1188 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
1189 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
1190 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
1191 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
1192 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
1193 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
1194 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
1195 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
1196 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
1197 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
1198 else {
1199 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
1200 le16_to_cpu(rsp->DialectRevision));
1201 goto neg_exit;
1202 }
1203
1204 rc = 0;
1205 server->dialect = le16_to_cpu(rsp->DialectRevision);
1206
1207 /*
1208 * Keep a copy of the hash after negprot. This hash will be
1209 * the starting hash value for all sessions made from this
1210 * server.
1211 */
1212 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
1213 SMB2_PREAUTH_HASH_SIZE);
1214
1215 /* SMB2 only has an extended negflavor */
1216 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
1217 /* set it to the maximum buffer size value we can send with 1 credit */
1218 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1219 SMB2_MAX_BUFFER_SIZE);
1220 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1221 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
1222 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
1223 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1224 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1225 server->sec_mode);
1226 server->capabilities = le32_to_cpu(rsp->Capabilities);
1227 /* Internal types */
1228 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
1229
1230 /*
1231 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1232 * Set the cipher type manually.
1233 */
1234 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1235 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1236
1237 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
1238 (struct smb2_hdr *)rsp);
1239 /*
1240 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1241 * for us will be
1242 * ses->sectype = RawNTLMSSP;
1243 * but for time being this is our only auth choice so doesn't matter.
1244 * We just found a server which sets blob length to zero expecting raw.
1245 */
1246 if (blob_length == 0) {
1247 cifs_dbg(FYI, "missing security blob on negprot\n");
1248 server->sec_ntlmssp = true;
1249 }
1250
1251 rc = cifs_enable_signing(server, ses->sign);
1252 if (rc)
1253 goto neg_exit;
1254 if (blob_length) {
1255 rc = decode_negTokenInit(security_blob, blob_length, server);
1256 if (rc == 1)
1257 rc = 0;
1258 else if (rc == 0)
1259 rc = -EIO;
1260 }
1261
1262 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1263 if (rsp->NegotiateContextCount)
1264 rc = smb311_decode_neg_context(rsp, server,
1265 rsp_iov.iov_len);
1266 else
1267 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
1268 }
1269
1270 if (server->cipher_type && !rc) {
1271 if (!SERVER_IS_CHAN(server)) {
1272 rc = smb3_crypto_aead_allocate(server);
1273 } else {
1274 /* For channels, just reuse the primary server crypto secmech. */
1275 server->secmech.enc = server->primary_server->secmech.enc;
1276 server->secmech.dec = server->primary_server->secmech.dec;
1277 }
1278 }
1279 neg_exit:
1280 free_rsp_buf(resp_buftype, rsp);
1281 return rc;
1282 }
1283
smb3_validate_negotiate(const unsigned int xid,struct cifs_tcon * tcon)1284 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1285 {
1286 int rc;
1287 struct validate_negotiate_info_req *pneg_inbuf;
1288 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
1289 u32 rsplen;
1290 u32 inbuflen; /* max of 4 dialects */
1291 struct TCP_Server_Info *server = tcon->ses->server;
1292
1293 cifs_dbg(FYI, "validate negotiate\n");
1294
1295 /* In SMB3.11 preauth integrity supersedes validate negotiate */
1296 if (server->dialect == SMB311_PROT_ID)
1297 return 0;
1298
1299 /*
1300 * validation ioctl must be signed, so no point sending this if we
1301 * can not sign it (ie are not known user). Even if signing is not
1302 * required (enabled but not negotiated), in those cases we selectively
1303 * sign just this, the first and only signed request on a connection.
1304 * Having validation of negotiate info helps reduce attack vectors.
1305 */
1306 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1307 return 0; /* validation requires signing */
1308
1309 if (tcon->ses->user_name == NULL) {
1310 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1311 return 0; /* validation requires signing */
1312 }
1313
1314 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1315 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1316
1317 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1318 if (!pneg_inbuf)
1319 return -ENOMEM;
1320
1321 pneg_inbuf->Capabilities =
1322 cpu_to_le32(server->vals->req_capabilities);
1323 if (tcon->ses->chan_max > 1)
1324 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1325
1326 memcpy(pneg_inbuf->Guid, server->client_guid,
1327 SMB2_CLIENT_GUID_SIZE);
1328
1329 if (tcon->ses->sign)
1330 pneg_inbuf->SecurityMode =
1331 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1332 else if (global_secflags & CIFSSEC_MAY_SIGN)
1333 pneg_inbuf->SecurityMode =
1334 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1335 else
1336 pneg_inbuf->SecurityMode = 0;
1337
1338
1339 if (strcmp(server->vals->version_string,
1340 SMB3ANY_VERSION_STRING) == 0) {
1341 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1342 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1343 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1344 pneg_inbuf->DialectCount = cpu_to_le16(3);
1345 /* SMB 2.1 not included so subtract one dialect from len */
1346 inbuflen = sizeof(*pneg_inbuf) -
1347 (sizeof(pneg_inbuf->Dialects[0]));
1348 } else if (strcmp(server->vals->version_string,
1349 SMBDEFAULT_VERSION_STRING) == 0) {
1350 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1351 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1352 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1353 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1354 pneg_inbuf->DialectCount = cpu_to_le16(4);
1355 /* structure is big enough for 4 dialects */
1356 inbuflen = sizeof(*pneg_inbuf);
1357 } else {
1358 /* otherwise specific dialect was requested */
1359 pneg_inbuf->Dialects[0] =
1360 cpu_to_le16(server->vals->protocol_id);
1361 pneg_inbuf->DialectCount = cpu_to_le16(1);
1362 /* structure is big enough for 4 dialects, sending only 1 */
1363 inbuflen = sizeof(*pneg_inbuf) -
1364 sizeof(pneg_inbuf->Dialects[0]) * 3;
1365 }
1366
1367 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1368 FSCTL_VALIDATE_NEGOTIATE_INFO,
1369 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1370 (char **)&pneg_rsp, &rsplen);
1371 if (rc == -EOPNOTSUPP) {
1372 /*
1373 * Old Windows versions or Netapp SMB server can return
1374 * not supported error. Client should accept it.
1375 */
1376 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
1377 rc = 0;
1378 goto out_free_inbuf;
1379 } else if (rc != 0) {
1380 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1381 rc);
1382 rc = -EIO;
1383 goto out_free_inbuf;
1384 }
1385
1386 rc = -EIO;
1387 if (rsplen != sizeof(*pneg_rsp)) {
1388 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1389 rsplen);
1390
1391 /* relax check since Mac returns max bufsize allowed on ioctl */
1392 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1393 goto out_free_rsp;
1394 }
1395
1396 /* check validate negotiate info response matches what we got earlier */
1397 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
1398 goto vneg_out;
1399
1400 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
1401 goto vneg_out;
1402
1403 /* do not validate server guid because not saved at negprot time yet */
1404
1405 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
1406 SMB2_LARGE_FILES) != server->capabilities)
1407 goto vneg_out;
1408
1409 /* validate negotiate successful */
1410 rc = 0;
1411 cifs_dbg(FYI, "validate negotiate info successful\n");
1412 goto out_free_rsp;
1413
1414 vneg_out:
1415 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
1416 out_free_rsp:
1417 kfree(pneg_rsp);
1418 out_free_inbuf:
1419 kfree(pneg_inbuf);
1420 return rc;
1421 }
1422
1423 enum securityEnum
smb2_select_sectype(struct TCP_Server_Info * server,enum securityEnum requested)1424 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1425 {
1426 switch (requested) {
1427 case Kerberos:
1428 case RawNTLMSSP:
1429 return requested;
1430 case NTLMv2:
1431 return RawNTLMSSP;
1432 case Unspecified:
1433 if (server->sec_ntlmssp &&
1434 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1435 return RawNTLMSSP;
1436 if ((server->sec_kerberos || server->sec_mskerberos) &&
1437 (global_secflags & CIFSSEC_MAY_KRB5))
1438 return Kerberos;
1439 fallthrough;
1440 default:
1441 return Unspecified;
1442 }
1443 }
1444
1445 struct SMB2_sess_data {
1446 unsigned int xid;
1447 struct cifs_ses *ses;
1448 struct TCP_Server_Info *server;
1449 struct nls_table *nls_cp;
1450 void (*func)(struct SMB2_sess_data *);
1451 int result;
1452 u64 previous_session;
1453
1454 /* we will send the SMB in three pieces:
1455 * a fixed length beginning part, an optional
1456 * SPNEGO blob (which can be zero length), and a
1457 * last part which will include the strings
1458 * and rest of bcc area. This allows us to avoid
1459 * a large buffer 17K allocation
1460 */
1461 int buf0_type;
1462 struct kvec iov[2];
1463 };
1464
1465 static int
SMB2_sess_alloc_buffer(struct SMB2_sess_data * sess_data)1466 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1467 {
1468 int rc;
1469 struct cifs_ses *ses = sess_data->ses;
1470 struct TCP_Server_Info *server = sess_data->server;
1471 struct smb2_sess_setup_req *req;
1472 unsigned int total_len;
1473 bool is_binding = false;
1474
1475 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1476 (void **) &req,
1477 &total_len);
1478 if (rc)
1479 return rc;
1480
1481 spin_lock(&ses->ses_lock);
1482 is_binding = (ses->ses_status == SES_GOOD);
1483 spin_unlock(&ses->ses_lock);
1484
1485 if (is_binding) {
1486 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1487 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1488 req->PreviousSessionId = 0;
1489 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1490 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
1491 } else {
1492 /* First session, not a reauthenticate */
1493 req->hdr.SessionId = 0;
1494 /*
1495 * if reconnect, we need to send previous sess id
1496 * otherwise it is 0
1497 */
1498 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
1499 req->Flags = 0; /* MBZ */
1500 cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
1501 sess_data->previous_session);
1502 }
1503
1504 /* enough to enable echos and oplocks and one max size write */
1505 if (server->credits >= server->max_credits)
1506 req->hdr.CreditRequest = cpu_to_le16(0);
1507 else
1508 req->hdr.CreditRequest = cpu_to_le16(
1509 min_t(int, server->max_credits -
1510 server->credits, 130));
1511
1512 /* only one of SMB2 signing flags may be set in SMB2 request */
1513 if (server->sign)
1514 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1515 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1516 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1517 else
1518 req->SecurityMode = 0;
1519
1520 #ifdef CONFIG_CIFS_DFS_UPCALL
1521 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1522 #else
1523 req->Capabilities = 0;
1524 #endif /* DFS_UPCALL */
1525
1526 req->Channel = 0; /* MBZ */
1527
1528 sess_data->iov[0].iov_base = (char *)req;
1529 /* 1 for pad */
1530 sess_data->iov[0].iov_len = total_len - 1;
1531 /*
1532 * This variable will be used to clear the buffer
1533 * allocated above in case of any error in the calling function.
1534 */
1535 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1536
1537 return 0;
1538 }
1539
1540 static void
SMB2_sess_free_buffer(struct SMB2_sess_data * sess_data)1541 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1542 {
1543 struct kvec *iov = sess_data->iov;
1544
1545 /* iov[1] is already freed by caller */
1546 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
1547 memzero_explicit(iov[0].iov_base, iov[0].iov_len);
1548
1549 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
1550 sess_data->buf0_type = CIFS_NO_BUFFER;
1551 }
1552
1553 static int
SMB2_sess_sendreceive(struct SMB2_sess_data * sess_data)1554 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1555 {
1556 int rc;
1557 struct smb_rqst rqst;
1558 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1559 struct kvec rsp_iov = { NULL, 0 };
1560
1561 /* Testing shows that buffer offset must be at location of Buffer[0] */
1562 req->SecurityBufferOffset =
1563 cpu_to_le16(sizeof(struct smb2_sess_setup_req));
1564 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1565
1566 memset(&rqst, 0, sizeof(struct smb_rqst));
1567 rqst.rq_iov = sess_data->iov;
1568 rqst.rq_nvec = 2;
1569
1570 /* BB add code to build os and lm fields */
1571 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
1572 sess_data->server,
1573 &rqst,
1574 &sess_data->buf0_type,
1575 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
1576 cifs_small_buf_release(sess_data->iov[0].iov_base);
1577 if (rc == 0)
1578 sess_data->ses->expired_pwd = false;
1579 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
1580 if (sess_data->ses->expired_pwd == false)
1581 trace_smb3_key_expired(sess_data->server->hostname,
1582 sess_data->ses->user_name,
1583 sess_data->server->conn_id,
1584 &sess_data->server->dstaddr, rc);
1585 sess_data->ses->expired_pwd = true;
1586 }
1587
1588 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1589
1590 return rc;
1591 }
1592
1593 static int
SMB2_sess_establish_session(struct SMB2_sess_data * sess_data)1594 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1595 {
1596 int rc = 0;
1597 struct cifs_ses *ses = sess_data->ses;
1598 struct TCP_Server_Info *server = sess_data->server;
1599
1600 cifs_server_lock(server);
1601 if (server->ops->generate_signingkey) {
1602 rc = server->ops->generate_signingkey(ses, server);
1603 if (rc) {
1604 cifs_dbg(FYI,
1605 "SMB3 session key generation failed\n");
1606 cifs_server_unlock(server);
1607 return rc;
1608 }
1609 }
1610 if (!server->session_estab) {
1611 server->sequence_number = 0x2;
1612 server->session_estab = true;
1613 }
1614 cifs_server_unlock(server);
1615
1616 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
1617 return rc;
1618 }
1619
1620 #ifdef CONFIG_CIFS_UPCALL
1621 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1622 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1623 {
1624 int rc;
1625 struct cifs_ses *ses = sess_data->ses;
1626 struct TCP_Server_Info *server = sess_data->server;
1627 struct cifs_spnego_msg *msg;
1628 struct key *spnego_key = NULL;
1629 struct smb2_sess_setup_rsp *rsp = NULL;
1630 bool is_binding = false;
1631
1632 rc = SMB2_sess_alloc_buffer(sess_data);
1633 if (rc)
1634 goto out;
1635
1636 spnego_key = cifs_get_spnego_key(ses, server);
1637 if (IS_ERR(spnego_key)) {
1638 rc = PTR_ERR(spnego_key);
1639 if (rc == -ENOKEY)
1640 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
1641 spnego_key = NULL;
1642 goto out;
1643 }
1644
1645 msg = spnego_key->payload.data[0];
1646 /*
1647 * check version field to make sure that cifs.upcall is
1648 * sending us a response in an expected form
1649 */
1650 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
1651 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1652 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
1653 rc = -EKEYREJECTED;
1654 goto out_put_spnego_key;
1655 }
1656
1657 spin_lock(&ses->ses_lock);
1658 is_binding = (ses->ses_status == SES_GOOD);
1659 spin_unlock(&ses->ses_lock);
1660
1661 /* keep session key if binding */
1662 if (!is_binding) {
1663 kfree_sensitive(ses->auth_key.response);
1664 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1665 GFP_KERNEL);
1666 if (!ses->auth_key.response) {
1667 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
1668 msg->sesskey_len);
1669 rc = -ENOMEM;
1670 goto out_put_spnego_key;
1671 }
1672 ses->auth_key.len = msg->sesskey_len;
1673 }
1674
1675 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1676 sess_data->iov[1].iov_len = msg->secblob_len;
1677
1678 rc = SMB2_sess_sendreceive(sess_data);
1679 if (rc)
1680 goto out_put_spnego_key;
1681
1682 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1683 /* keep session id and flags if binding */
1684 if (!is_binding) {
1685 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1686 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1687 }
1688
1689 rc = SMB2_sess_establish_session(sess_data);
1690 out_put_spnego_key:
1691 key_invalidate(spnego_key);
1692 key_put(spnego_key);
1693 if (rc) {
1694 kfree_sensitive(ses->auth_key.response);
1695 ses->auth_key.response = NULL;
1696 ses->auth_key.len = 0;
1697 }
1698 out:
1699 sess_data->result = rc;
1700 sess_data->func = NULL;
1701 SMB2_sess_free_buffer(sess_data);
1702 }
1703 #else
1704 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1705 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1706 {
1707 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1708 sess_data->result = -EOPNOTSUPP;
1709 sess_data->func = NULL;
1710 }
1711 #endif
1712
1713 static void
1714 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1715
1716 static void
SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data * sess_data)1717 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1718 {
1719 int rc;
1720 struct cifs_ses *ses = sess_data->ses;
1721 struct TCP_Server_Info *server = sess_data->server;
1722 struct smb2_sess_setup_rsp *rsp = NULL;
1723 unsigned char *ntlmssp_blob = NULL;
1724 bool use_spnego = false; /* else use raw ntlmssp */
1725 u16 blob_length = 0;
1726 bool is_binding = false;
1727
1728 /*
1729 * If memory allocation is successful, caller of this function
1730 * frees it.
1731 */
1732 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1733 if (!ses->ntlmssp) {
1734 rc = -ENOMEM;
1735 goto out_err;
1736 }
1737 ses->ntlmssp->sesskey_per_smbsess = true;
1738
1739 rc = SMB2_sess_alloc_buffer(sess_data);
1740 if (rc)
1741 goto out_err;
1742
1743 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
1744 &blob_length, ses, server,
1745 sess_data->nls_cp);
1746 if (rc)
1747 goto out;
1748
1749 if (use_spnego) {
1750 /* BB eventually need to add this */
1751 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1752 rc = -EOPNOTSUPP;
1753 goto out;
1754 }
1755 sess_data->iov[1].iov_base = ntlmssp_blob;
1756 sess_data->iov[1].iov_len = blob_length;
1757
1758 rc = SMB2_sess_sendreceive(sess_data);
1759 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1760
1761 /* If true, rc here is expected and not an error */
1762 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
1763 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
1764 rc = 0;
1765
1766 if (rc)
1767 goto out;
1768
1769 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
1770 le16_to_cpu(rsp->SecurityBufferOffset)) {
1771 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1772 le16_to_cpu(rsp->SecurityBufferOffset));
1773 rc = -EIO;
1774 goto out;
1775 }
1776 rc = decode_ntlmssp_challenge(rsp->Buffer,
1777 le16_to_cpu(rsp->SecurityBufferLength), ses);
1778 if (rc)
1779 goto out;
1780
1781 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1782
1783 spin_lock(&ses->ses_lock);
1784 is_binding = (ses->ses_status == SES_GOOD);
1785 spin_unlock(&ses->ses_lock);
1786
1787 /* keep existing ses id and flags if binding */
1788 if (!is_binding) {
1789 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1790 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1791 }
1792
1793 out:
1794 kfree_sensitive(ntlmssp_blob);
1795 SMB2_sess_free_buffer(sess_data);
1796 if (!rc) {
1797 sess_data->result = 0;
1798 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1799 return;
1800 }
1801 out_err:
1802 kfree_sensitive(ses->ntlmssp);
1803 ses->ntlmssp = NULL;
1804 sess_data->result = rc;
1805 sess_data->func = NULL;
1806 }
1807
1808 static void
SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data * sess_data)1809 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1810 {
1811 int rc;
1812 struct cifs_ses *ses = sess_data->ses;
1813 struct TCP_Server_Info *server = sess_data->server;
1814 struct smb2_sess_setup_req *req;
1815 struct smb2_sess_setup_rsp *rsp = NULL;
1816 unsigned char *ntlmssp_blob = NULL;
1817 bool use_spnego = false; /* else use raw ntlmssp */
1818 u16 blob_length = 0;
1819 bool is_binding = false;
1820
1821 rc = SMB2_sess_alloc_buffer(sess_data);
1822 if (rc)
1823 goto out;
1824
1825 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1826 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1827
1828 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
1829 ses, server,
1830 sess_data->nls_cp);
1831 if (rc) {
1832 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1833 goto out;
1834 }
1835
1836 if (use_spnego) {
1837 /* BB eventually need to add this */
1838 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1839 rc = -EOPNOTSUPP;
1840 goto out;
1841 }
1842 sess_data->iov[1].iov_base = ntlmssp_blob;
1843 sess_data->iov[1].iov_len = blob_length;
1844
1845 rc = SMB2_sess_sendreceive(sess_data);
1846 if (rc)
1847 goto out;
1848
1849 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1850
1851 spin_lock(&ses->ses_lock);
1852 is_binding = (ses->ses_status == SES_GOOD);
1853 spin_unlock(&ses->ses_lock);
1854
1855 /* keep existing ses id and flags if binding */
1856 if (!is_binding) {
1857 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1858 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1859 }
1860
1861 rc = SMB2_sess_establish_session(sess_data);
1862 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1863 if (ses->server->dialect < SMB30_PROT_ID) {
1864 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1865 /*
1866 * The session id is opaque in terms of endianness, so we can't
1867 * print it as a long long. we dump it as we got it on the wire
1868 */
1869 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1870 &ses->Suid);
1871 cifs_dbg(VFS, "Session Key %*ph\n",
1872 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1873 cifs_dbg(VFS, "Signing Key %*ph\n",
1874 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1875 }
1876 #endif
1877 out:
1878 kfree_sensitive(ntlmssp_blob);
1879 SMB2_sess_free_buffer(sess_data);
1880 kfree_sensitive(ses->ntlmssp);
1881 ses->ntlmssp = NULL;
1882 sess_data->result = rc;
1883 sess_data->func = NULL;
1884 }
1885
1886 static int
SMB2_select_sec(struct SMB2_sess_data * sess_data)1887 SMB2_select_sec(struct SMB2_sess_data *sess_data)
1888 {
1889 int type;
1890 struct cifs_ses *ses = sess_data->ses;
1891 struct TCP_Server_Info *server = sess_data->server;
1892
1893 type = smb2_select_sectype(server, ses->sectype);
1894 cifs_dbg(FYI, "sess setup type %d\n", type);
1895 if (type == Unspecified) {
1896 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
1897 return -EINVAL;
1898 }
1899
1900 switch (type) {
1901 case Kerberos:
1902 sess_data->func = SMB2_auth_kerberos;
1903 break;
1904 case RawNTLMSSP:
1905 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1906 break;
1907 default:
1908 cifs_dbg(VFS, "secType %d not supported!\n", type);
1909 return -EOPNOTSUPP;
1910 }
1911
1912 return 0;
1913 }
1914
1915 int
SMB2_sess_setup(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const struct nls_table * nls_cp)1916 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1917 struct TCP_Server_Info *server,
1918 const struct nls_table *nls_cp)
1919 {
1920 int rc = 0;
1921 struct SMB2_sess_data *sess_data;
1922
1923 cifs_dbg(FYI, "Session Setup\n");
1924
1925 if (!server) {
1926 WARN(1, "%s: server is NULL!\n", __func__);
1927 return -EIO;
1928 }
1929
1930 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1931 if (!sess_data)
1932 return -ENOMEM;
1933
1934 sess_data->xid = xid;
1935 sess_data->ses = ses;
1936 sess_data->server = server;
1937 sess_data->buf0_type = CIFS_NO_BUFFER;
1938 sess_data->nls_cp = (struct nls_table *) nls_cp;
1939 sess_data->previous_session = ses->Suid;
1940
1941 rc = SMB2_select_sec(sess_data);
1942 if (rc)
1943 goto out;
1944
1945 /*
1946 * Initialize the session hash with the server one.
1947 */
1948 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
1949 SMB2_PREAUTH_HASH_SIZE);
1950
1951 while (sess_data->func)
1952 sess_data->func(sess_data);
1953
1954 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
1955 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
1956 rc = sess_data->result;
1957 out:
1958 kfree_sensitive(sess_data);
1959 return rc;
1960 }
1961
1962 int
SMB2_logoff(const unsigned int xid,struct cifs_ses * ses)1963 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1964 {
1965 struct smb_rqst rqst;
1966 struct smb2_logoff_req *req; /* response is also trivial struct */
1967 int rc = 0;
1968 struct TCP_Server_Info *server;
1969 int flags = 0;
1970 unsigned int total_len;
1971 struct kvec iov[1];
1972 struct kvec rsp_iov;
1973 int resp_buf_type;
1974
1975 cifs_dbg(FYI, "disconnect session %p\n", ses);
1976
1977 if (ses && (ses->server))
1978 server = ses->server;
1979 else
1980 return -EIO;
1981
1982 /* no need to send SMB logoff if uid already closed due to reconnect */
1983 spin_lock(&ses->chan_lock);
1984 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
1985 spin_unlock(&ses->chan_lock);
1986 goto smb2_session_already_dead;
1987 }
1988 spin_unlock(&ses->chan_lock);
1989
1990 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1991 (void **) &req, &total_len);
1992 if (rc)
1993 return rc;
1994
1995 /* since no tcon, smb2_init can not do this, so do here */
1996 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1997
1998 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
1999 flags |= CIFS_TRANSFORM_REQ;
2000 else if (server->sign)
2001 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2002
2003 flags |= CIFS_NO_RSP_BUF;
2004
2005 iov[0].iov_base = (char *)req;
2006 iov[0].iov_len = total_len;
2007
2008 memset(&rqst, 0, sizeof(struct smb_rqst));
2009 rqst.rq_iov = iov;
2010 rqst.rq_nvec = 1;
2011
2012 rc = cifs_send_recv(xid, ses, ses->server,
2013 &rqst, &resp_buf_type, flags, &rsp_iov);
2014 cifs_small_buf_release(req);
2015 /*
2016 * No tcon so can't do
2017 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
2018 */
2019
2020 smb2_session_already_dead:
2021 return rc;
2022 }
2023
cifs_stats_fail_inc(struct cifs_tcon * tcon,uint16_t code)2024 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
2025 {
2026 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
2027 }
2028
2029 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
2030
2031 /* These are similar values to what Windows uses */
init_copy_chunk_defaults(struct cifs_tcon * tcon)2032 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
2033 {
2034 tcon->max_chunks = 256;
2035 tcon->max_bytes_chunk = 1048576;
2036 tcon->max_bytes_copy = 16777216;
2037 }
2038
2039 int
SMB2_tcon(const unsigned int xid,struct cifs_ses * ses,const char * tree,struct cifs_tcon * tcon,const struct nls_table * cp)2040 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
2041 struct cifs_tcon *tcon, const struct nls_table *cp)
2042 {
2043 struct smb_rqst rqst;
2044 struct smb2_tree_connect_req *req;
2045 struct smb2_tree_connect_rsp *rsp = NULL;
2046 struct kvec iov[2];
2047 struct kvec rsp_iov = { NULL, 0 };
2048 int rc = 0;
2049 int resp_buftype;
2050 int unc_path_len;
2051 __le16 *unc_path = NULL;
2052 int flags = 0;
2053 unsigned int total_len;
2054 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2055
2056 cifs_dbg(FYI, "TCON\n");
2057
2058 if (!server || !tree)
2059 return -EIO;
2060
2061 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
2062 if (unc_path == NULL)
2063 return -ENOMEM;
2064
2065 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp);
2066 if (unc_path_len <= 0) {
2067 kfree(unc_path);
2068 return -EINVAL;
2069 }
2070 unc_path_len *= 2;
2071
2072 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
2073 tcon->tid = 0;
2074 atomic_set(&tcon->num_remote_opens, 0);
2075 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
2076 (void **) &req, &total_len);
2077 if (rc) {
2078 kfree(unc_path);
2079 return rc;
2080 }
2081
2082 if (smb3_encryption_required(tcon))
2083 flags |= CIFS_TRANSFORM_REQ;
2084
2085 iov[0].iov_base = (char *)req;
2086 /* 1 for pad */
2087 iov[0].iov_len = total_len - 1;
2088
2089 /* Testing shows that buffer offset must be at location of Buffer[0] */
2090 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req));
2091 req->PathLength = cpu_to_le16(unc_path_len);
2092 iov[1].iov_base = unc_path;
2093 iov[1].iov_len = unc_path_len;
2094
2095 /*
2096 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
2097 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
2098 * (Samba servers don't always set the flag so also check if null user)
2099 */
2100 if ((server->dialect == SMB311_PROT_ID) &&
2101 !smb3_encryption_required(tcon) &&
2102 !(ses->session_flags &
2103 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
2104 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
2105 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2106
2107 memset(&rqst, 0, sizeof(struct smb_rqst));
2108 rqst.rq_iov = iov;
2109 rqst.rq_nvec = 2;
2110
2111 /* Need 64 for max size write so ask for more in case not there yet */
2112 if (server->credits >= server->max_credits)
2113 req->hdr.CreditRequest = cpu_to_le16(0);
2114 else
2115 req->hdr.CreditRequest = cpu_to_le16(
2116 min_t(int, server->max_credits -
2117 server->credits, 64));
2118
2119 rc = cifs_send_recv(xid, ses, server,
2120 &rqst, &resp_buftype, flags, &rsp_iov);
2121 cifs_small_buf_release(req);
2122 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
2123 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
2124 if ((rc != 0) || (rsp == NULL)) {
2125 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
2126 tcon->need_reconnect = true;
2127 goto tcon_error_exit;
2128 }
2129
2130 switch (rsp->ShareType) {
2131 case SMB2_SHARE_TYPE_DISK:
2132 cifs_dbg(FYI, "connection to disk share\n");
2133 break;
2134 case SMB2_SHARE_TYPE_PIPE:
2135 tcon->pipe = true;
2136 cifs_dbg(FYI, "connection to pipe share\n");
2137 break;
2138 case SMB2_SHARE_TYPE_PRINT:
2139 tcon->print = true;
2140 cifs_dbg(FYI, "connection to printer\n");
2141 break;
2142 default:
2143 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
2144 rc = -EOPNOTSUPP;
2145 goto tcon_error_exit;
2146 }
2147
2148 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
2149 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
2150 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
2151 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
2152 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
2153
2154 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
2155 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
2156 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
2157
2158 if (tcon->seal &&
2159 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
2160 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
2161
2162 init_copy_chunk_defaults(tcon);
2163 if (server->ops->validate_negotiate)
2164 rc = server->ops->validate_negotiate(xid, tcon);
2165 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */
2166 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT)
2167 server->nosharesock = true;
2168 tcon_exit:
2169
2170 free_rsp_buf(resp_buftype, rsp);
2171 kfree(unc_path);
2172 return rc;
2173
2174 tcon_error_exit:
2175 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
2176 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
2177 goto tcon_exit;
2178 }
2179
2180 int
SMB2_tdis(const unsigned int xid,struct cifs_tcon * tcon)2181 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
2182 {
2183 struct smb_rqst rqst;
2184 struct smb2_tree_disconnect_req *req; /* response is trivial */
2185 int rc = 0;
2186 struct cifs_ses *ses = tcon->ses;
2187 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2188 int flags = 0;
2189 unsigned int total_len;
2190 struct kvec iov[1];
2191 struct kvec rsp_iov;
2192 int resp_buf_type;
2193
2194 cifs_dbg(FYI, "Tree Disconnect\n");
2195
2196 if (!ses || !(ses->server))
2197 return -EIO;
2198
2199 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
2200 spin_lock(&ses->chan_lock);
2201 if ((tcon->need_reconnect) ||
2202 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
2203 spin_unlock(&ses->chan_lock);
2204 return 0;
2205 }
2206 spin_unlock(&ses->chan_lock);
2207
2208 invalidate_all_cached_dirs(tcon);
2209
2210 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
2211 (void **) &req,
2212 &total_len);
2213 if (rc)
2214 return rc;
2215
2216 if (smb3_encryption_required(tcon))
2217 flags |= CIFS_TRANSFORM_REQ;
2218
2219 flags |= CIFS_NO_RSP_BUF;
2220
2221 iov[0].iov_base = (char *)req;
2222 iov[0].iov_len = total_len;
2223
2224 memset(&rqst, 0, sizeof(struct smb_rqst));
2225 rqst.rq_iov = iov;
2226 rqst.rq_nvec = 1;
2227
2228 rc = cifs_send_recv(xid, ses, server,
2229 &rqst, &resp_buf_type, flags, &rsp_iov);
2230 cifs_small_buf_release(req);
2231 if (rc) {
2232 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
2233 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
2234 }
2235 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
2236
2237 return rc;
2238 }
2239
2240
2241 static struct create_durable *
create_durable_buf(void)2242 create_durable_buf(void)
2243 {
2244 struct create_durable *buf;
2245
2246 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
2247 if (!buf)
2248 return NULL;
2249
2250 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2251 (struct create_durable, Data));
2252 buf->ccontext.DataLength = cpu_to_le32(16);
2253 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2254 (struct create_durable, Name));
2255 buf->ccontext.NameLength = cpu_to_le16(4);
2256 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
2257 buf->Name[0] = 'D';
2258 buf->Name[1] = 'H';
2259 buf->Name[2] = 'n';
2260 buf->Name[3] = 'Q';
2261 return buf;
2262 }
2263
2264 static struct create_durable *
create_reconnect_durable_buf(struct cifs_fid * fid)2265 create_reconnect_durable_buf(struct cifs_fid *fid)
2266 {
2267 struct create_durable *buf;
2268
2269 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
2270 if (!buf)
2271 return NULL;
2272
2273 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2274 (struct create_durable, Data));
2275 buf->ccontext.DataLength = cpu_to_le32(16);
2276 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2277 (struct create_durable, Name));
2278 buf->ccontext.NameLength = cpu_to_le16(4);
2279 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
2280 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
2281 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
2282 buf->Name[0] = 'D';
2283 buf->Name[1] = 'H';
2284 buf->Name[2] = 'n';
2285 buf->Name[3] = 'C';
2286 return buf;
2287 }
2288
2289 static void
parse_query_id_ctxt(struct create_context * cc,struct smb2_file_all_info * buf)2290 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
2291 {
2292 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc;
2293
2294 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2295 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2296 buf->IndexNumber = pdisk_id->DiskFileId;
2297 }
2298
2299 static void
parse_posix_ctxt(struct create_context * cc,struct smb2_file_all_info * info,struct create_posix_rsp * posix)2300 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2301 struct create_posix_rsp *posix)
2302 {
2303 int sid_len;
2304 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2305 u8 *end = beg + le32_to_cpu(cc->DataLength);
2306 u8 *sid;
2307
2308 memset(posix, 0, sizeof(*posix));
2309
2310 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2311 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2312 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2313
2314 sid = beg + 12;
2315 sid_len = posix_info_sid_size(sid, end);
2316 if (sid_len < 0) {
2317 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2318 return;
2319 }
2320 memcpy(&posix->owner, sid, sid_len);
2321
2322 sid = sid + sid_len;
2323 sid_len = posix_info_sid_size(sid, end);
2324 if (sid_len < 0) {
2325 cifs_dbg(VFS, "bad group sid in posix create response\n");
2326 return;
2327 }
2328 memcpy(&posix->group, sid, sid_len);
2329
2330 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2331 posix->nlink, posix->mode, posix->reparse_tag);
2332 }
2333
smb2_parse_contexts(struct TCP_Server_Info * server,struct kvec * rsp_iov,unsigned int * epoch,char * lease_key,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix)2334 int smb2_parse_contexts(struct TCP_Server_Info *server,
2335 struct kvec *rsp_iov,
2336 unsigned int *epoch,
2337 char *lease_key, __u8 *oplock,
2338 struct smb2_file_all_info *buf,
2339 struct create_posix_rsp *posix)
2340 {
2341 struct smb2_create_rsp *rsp = rsp_iov->iov_base;
2342 struct create_context *cc;
2343 size_t rem, off, len;
2344 size_t doff, dlen;
2345 size_t noff, nlen;
2346 char *name;
2347 static const char smb3_create_tag_posix[] = {
2348 0x93, 0xAD, 0x25, 0x50, 0x9C,
2349 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2350 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2351 };
2352
2353 *oplock = 0;
2354
2355 off = le32_to_cpu(rsp->CreateContextsOffset);
2356 rem = le32_to_cpu(rsp->CreateContextsLength);
2357 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
2358 return -EINVAL;
2359 cc = (struct create_context *)((u8 *)rsp + off);
2360
2361 /* Initialize inode number to 0 in case no valid data in qfid context */
2362 if (buf)
2363 buf->IndexNumber = 0;
2364
2365 while (rem >= sizeof(*cc)) {
2366 doff = le16_to_cpu(cc->DataOffset);
2367 dlen = le32_to_cpu(cc->DataLength);
2368 if (check_add_overflow(doff, dlen, &len) || len > rem)
2369 return -EINVAL;
2370
2371 noff = le16_to_cpu(cc->NameOffset);
2372 nlen = le16_to_cpu(cc->NameLength);
2373 if (noff + nlen > doff)
2374 return -EINVAL;
2375
2376 name = (char *)cc + noff;
2377 switch (nlen) {
2378 case 4:
2379 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
2380 *oplock = server->ops->parse_lease_buf(cc, epoch,
2381 lease_key);
2382 } else if (buf &&
2383 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
2384 parse_query_id_ctxt(cc, buf);
2385 }
2386 break;
2387 case 16:
2388 if (posix && !memcmp(name, smb3_create_tag_posix, 16))
2389 parse_posix_ctxt(cc, buf, posix);
2390 break;
2391 default:
2392 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
2393 __func__, nlen, dlen);
2394 if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
2395 cifs_dump_mem("context data: ", cc, dlen);
2396 break;
2397 }
2398
2399 off = le32_to_cpu(cc->Next);
2400 if (!off)
2401 break;
2402 if (check_sub_overflow(rem, off, &rem))
2403 return -EINVAL;
2404 cc = (struct create_context *)((u8 *)cc + off);
2405 }
2406
2407 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2408 *oplock = rsp->OplockLevel;
2409
2410 return 0;
2411 }
2412
2413 static int
add_lease_context(struct TCP_Server_Info * server,struct smb2_create_req * req,struct kvec * iov,unsigned int * num_iovec,u8 * lease_key,__u8 * oplock)2414 add_lease_context(struct TCP_Server_Info *server,
2415 struct smb2_create_req *req,
2416 struct kvec *iov,
2417 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
2418 {
2419 unsigned int num = *num_iovec;
2420
2421 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
2422 if (iov[num].iov_base == NULL)
2423 return -ENOMEM;
2424 iov[num].iov_len = server->vals->create_lease_size;
2425 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2426 *num_iovec = num + 1;
2427 return 0;
2428 }
2429
2430 static struct create_durable_v2 *
create_durable_v2_buf(struct cifs_open_parms * oparms)2431 create_durable_v2_buf(struct cifs_open_parms *oparms)
2432 {
2433 struct cifs_fid *pfid = oparms->fid;
2434 struct create_durable_v2 *buf;
2435
2436 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2437 if (!buf)
2438 return NULL;
2439
2440 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2441 (struct create_durable_v2, dcontext));
2442 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2443 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2444 (struct create_durable_v2, Name));
2445 buf->ccontext.NameLength = cpu_to_le16(4);
2446
2447 /*
2448 * NB: Handle timeout defaults to 0, which allows server to choose
2449 * (most servers default to 120 seconds) and most clients default to 0.
2450 * This can be overridden at mount ("handletimeout=") if the user wants
2451 * a different persistent (or resilient) handle timeout for all opens
2452 * on a particular SMB3 mount.
2453 */
2454 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2455 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2456
2457 /* for replay, we should not overwrite the existing create guid */
2458 if (!oparms->replay) {
2459 generate_random_uuid(buf->dcontext.CreateGuid);
2460 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2461 } else
2462 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
2463
2464 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2465 buf->Name[0] = 'D';
2466 buf->Name[1] = 'H';
2467 buf->Name[2] = '2';
2468 buf->Name[3] = 'Q';
2469 return buf;
2470 }
2471
2472 static struct create_durable_handle_reconnect_v2 *
create_reconnect_durable_v2_buf(struct cifs_fid * fid)2473 create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2474 {
2475 struct create_durable_handle_reconnect_v2 *buf;
2476
2477 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2478 GFP_KERNEL);
2479 if (!buf)
2480 return NULL;
2481
2482 buf->ccontext.DataOffset =
2483 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2484 dcontext));
2485 buf->ccontext.DataLength =
2486 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2487 buf->ccontext.NameOffset =
2488 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2489 Name));
2490 buf->ccontext.NameLength = cpu_to_le16(4);
2491
2492 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2493 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2494 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2495 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2496
2497 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2498 buf->Name[0] = 'D';
2499 buf->Name[1] = 'H';
2500 buf->Name[2] = '2';
2501 buf->Name[3] = 'C';
2502 return buf;
2503 }
2504
2505 static int
add_durable_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2506 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2507 struct cifs_open_parms *oparms)
2508 {
2509 unsigned int num = *num_iovec;
2510
2511 iov[num].iov_base = create_durable_v2_buf(oparms);
2512 if (iov[num].iov_base == NULL)
2513 return -ENOMEM;
2514 iov[num].iov_len = sizeof(struct create_durable_v2);
2515 *num_iovec = num + 1;
2516 return 0;
2517 }
2518
2519 static int
add_durable_reconnect_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2520 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2521 struct cifs_open_parms *oparms)
2522 {
2523 unsigned int num = *num_iovec;
2524
2525 /* indicate that we don't need to relock the file */
2526 oparms->reconnect = false;
2527
2528 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2529 if (iov[num].iov_base == NULL)
2530 return -ENOMEM;
2531 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2532 *num_iovec = num + 1;
2533 return 0;
2534 }
2535
2536 static int
add_durable_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms,bool use_persistent)2537 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2538 struct cifs_open_parms *oparms, bool use_persistent)
2539 {
2540 unsigned int num = *num_iovec;
2541
2542 if (use_persistent) {
2543 if (oparms->reconnect)
2544 return add_durable_reconnect_v2_context(iov, num_iovec,
2545 oparms);
2546 else
2547 return add_durable_v2_context(iov, num_iovec, oparms);
2548 }
2549
2550 if (oparms->reconnect) {
2551 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2552 /* indicate that we don't need to relock the file */
2553 oparms->reconnect = false;
2554 } else
2555 iov[num].iov_base = create_durable_buf();
2556 if (iov[num].iov_base == NULL)
2557 return -ENOMEM;
2558 iov[num].iov_len = sizeof(struct create_durable);
2559 *num_iovec = num + 1;
2560 return 0;
2561 }
2562
2563 /* See MS-SMB2 2.2.13.2.7 */
2564 static struct crt_twarp_ctxt *
create_twarp_buf(__u64 timewarp)2565 create_twarp_buf(__u64 timewarp)
2566 {
2567 struct crt_twarp_ctxt *buf;
2568
2569 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2570 if (!buf)
2571 return NULL;
2572
2573 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2574 (struct crt_twarp_ctxt, Timestamp));
2575 buf->ccontext.DataLength = cpu_to_le32(8);
2576 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2577 (struct crt_twarp_ctxt, Name));
2578 buf->ccontext.NameLength = cpu_to_le16(4);
2579 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2580 buf->Name[0] = 'T';
2581 buf->Name[1] = 'W';
2582 buf->Name[2] = 'r';
2583 buf->Name[3] = 'p';
2584 buf->Timestamp = cpu_to_le64(timewarp);
2585 return buf;
2586 }
2587
2588 /* See MS-SMB2 2.2.13.2.7 */
2589 static int
add_twarp_context(struct kvec * iov,unsigned int * num_iovec,__u64 timewarp)2590 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2591 {
2592 unsigned int num = *num_iovec;
2593
2594 iov[num].iov_base = create_twarp_buf(timewarp);
2595 if (iov[num].iov_base == NULL)
2596 return -ENOMEM;
2597 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2598 *num_iovec = num + 1;
2599 return 0;
2600 }
2601
2602 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
setup_owner_group_sids(char * buf)2603 static void setup_owner_group_sids(char *buf)
2604 {
2605 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2606
2607 /* Populate the user ownership fields S-1-5-88-1 */
2608 sids->owner.Revision = 1;
2609 sids->owner.NumAuth = 3;
2610 sids->owner.Authority[5] = 5;
2611 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2612 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2613 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2614
2615 /* Populate the group ownership fields S-1-5-88-2 */
2616 sids->group.Revision = 1;
2617 sids->group.NumAuth = 3;
2618 sids->group.Authority[5] = 5;
2619 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2620 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2621 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
2622
2623 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
2624 }
2625
2626 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2627 static struct crt_sd_ctxt *
create_sd_buf(umode_t mode,bool set_owner,unsigned int * len)2628 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
2629 {
2630 struct crt_sd_ctxt *buf;
2631 __u8 *ptr, *aclptr;
2632 unsigned int acelen, acl_size, ace_count;
2633 unsigned int owner_offset = 0;
2634 unsigned int group_offset = 0;
2635 struct smb3_acl acl = {};
2636
2637 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
2638
2639 if (set_owner) {
2640 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2641 *len += sizeof(struct owner_group_sids);
2642 }
2643
2644 buf = kzalloc(*len, GFP_KERNEL);
2645 if (buf == NULL)
2646 return buf;
2647
2648 ptr = (__u8 *)&buf[1];
2649 if (set_owner) {
2650 /* offset fields are from beginning of security descriptor not of create context */
2651 owner_offset = ptr - (__u8 *)&buf->sd;
2652 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
2653 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
2654 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
2655
2656 setup_owner_group_sids(ptr);
2657 ptr += sizeof(struct owner_group_sids);
2658 } else {
2659 buf->sd.OffsetOwner = 0;
2660 buf->sd.OffsetGroup = 0;
2661 }
2662
2663 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
2664 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
2665 buf->ccontext.NameLength = cpu_to_le16(4);
2666 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2667 buf->Name[0] = 'S';
2668 buf->Name[1] = 'e';
2669 buf->Name[2] = 'c';
2670 buf->Name[3] = 'D';
2671 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
2672
2673 /*
2674 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2675 * and "DP" ie the DACL is present
2676 */
2677 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2678
2679 /* offset owner, group and Sbz1 and SACL are all zero */
2680 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2681 /* Ship the ACL for now. we will copy it into buf later. */
2682 aclptr = ptr;
2683 ptr += sizeof(struct smb3_acl);
2684
2685 /* create one ACE to hold the mode embedded in reserved special SID */
2686 acelen = setup_special_mode_ACE((struct smb_ace *)ptr, (__u64)mode);
2687 ptr += acelen;
2688 acl_size = acelen + sizeof(struct smb3_acl);
2689 ace_count = 1;
2690
2691 if (set_owner) {
2692 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
2693 acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
2694 ptr += acelen;
2695 acl_size += acelen;
2696 ace_count += 1;
2697 }
2698
2699 /* and one more ACE to allow access for authenticated users */
2700 acelen = setup_authusers_ACE((struct smb_ace *)ptr);
2701 ptr += acelen;
2702 acl_size += acelen;
2703 ace_count += 1;
2704
2705 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2706 acl.AclSize = cpu_to_le16(acl_size);
2707 acl.AceCount = cpu_to_le16(ace_count);
2708 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
2709 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
2710
2711 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2712 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
2713
2714 return buf;
2715 }
2716
2717 static int
add_sd_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode,bool set_owner)2718 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2719 {
2720 unsigned int num = *num_iovec;
2721 unsigned int len = 0;
2722
2723 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2724 if (iov[num].iov_base == NULL)
2725 return -ENOMEM;
2726 iov[num].iov_len = len;
2727 *num_iovec = num + 1;
2728 return 0;
2729 }
2730
2731 static struct crt_query_id_ctxt *
create_query_id_buf(void)2732 create_query_id_buf(void)
2733 {
2734 struct crt_query_id_ctxt *buf;
2735
2736 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2737 if (!buf)
2738 return NULL;
2739
2740 buf->ccontext.DataOffset = cpu_to_le16(0);
2741 buf->ccontext.DataLength = cpu_to_le32(0);
2742 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2743 (struct crt_query_id_ctxt, Name));
2744 buf->ccontext.NameLength = cpu_to_le16(4);
2745 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2746 buf->Name[0] = 'Q';
2747 buf->Name[1] = 'F';
2748 buf->Name[2] = 'i';
2749 buf->Name[3] = 'd';
2750 return buf;
2751 }
2752
2753 /* See MS-SMB2 2.2.13.2.9 */
2754 static int
add_query_id_context(struct kvec * iov,unsigned int * num_iovec)2755 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2756 {
2757 unsigned int num = *num_iovec;
2758
2759 iov[num].iov_base = create_query_id_buf();
2760 if (iov[num].iov_base == NULL)
2761 return -ENOMEM;
2762 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2763 *num_iovec = num + 1;
2764 return 0;
2765 }
2766
add_ea_context(struct cifs_open_parms * oparms,struct kvec * rq_iov,unsigned int * num_iovs)2767 static void add_ea_context(struct cifs_open_parms *oparms,
2768 struct kvec *rq_iov, unsigned int *num_iovs)
2769 {
2770 struct kvec *iov = oparms->ea_cctx;
2771
2772 if (iov && iov->iov_base && iov->iov_len) {
2773 rq_iov[(*num_iovs)++] = *iov;
2774 memset(iov, 0, sizeof(*iov));
2775 }
2776 }
2777
2778 static int
alloc_path_with_tree_prefix(__le16 ** out_path,int * out_size,int * out_len,const char * treename,const __le16 * path)2779 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2780 const char *treename, const __le16 *path)
2781 {
2782 int treename_len, path_len;
2783 struct nls_table *cp;
2784 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2785
2786 /*
2787 * skip leading "\\"
2788 */
2789 treename_len = strlen(treename);
2790 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2791 return -EINVAL;
2792
2793 treename += 2;
2794 treename_len -= 2;
2795
2796 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2797
2798 /* make room for one path separator only if @path isn't empty */
2799 *out_len = treename_len + (path[0] ? 1 : 0) + path_len;
2800
2801 /*
2802 * final path needs to be 8-byte aligned as specified in
2803 * MS-SMB2 2.2.13 SMB2 CREATE Request.
2804 */
2805 *out_size = round_up(*out_len * sizeof(__le16), 8);
2806 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
2807 if (!*out_path)
2808 return -ENOMEM;
2809
2810 cp = load_nls_default();
2811 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2812
2813 /* Do not append the separator if the path is empty */
2814 if (path[0] != cpu_to_le16(0x0000)) {
2815 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep);
2816 UniStrcat((wchar_t *)*out_path, (wchar_t *)path);
2817 }
2818
2819 unload_nls(cp);
2820
2821 return 0;
2822 }
2823
smb311_posix_mkdir(const unsigned int xid,struct inode * inode,umode_t mode,struct cifs_tcon * tcon,const char * full_path,struct cifs_sb_info * cifs_sb)2824 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2825 umode_t mode, struct cifs_tcon *tcon,
2826 const char *full_path,
2827 struct cifs_sb_info *cifs_sb)
2828 {
2829 struct smb_rqst rqst;
2830 struct smb2_create_req *req;
2831 struct smb2_create_rsp *rsp = NULL;
2832 struct cifs_ses *ses = tcon->ses;
2833 struct kvec iov[3]; /* make sure at least one for each open context */
2834 struct kvec rsp_iov = {NULL, 0};
2835 int resp_buftype;
2836 int uni_path_len;
2837 __le16 *copy_path = NULL;
2838 int copy_size;
2839 int rc = 0;
2840 unsigned int n_iov = 2;
2841 __u32 file_attributes = 0;
2842 char *pc_buf = NULL;
2843 int flags = 0;
2844 unsigned int total_len;
2845 __le16 *utf16_path = NULL;
2846 struct TCP_Server_Info *server;
2847 int retries = 0, cur_sleep = 1;
2848
2849 replay_again:
2850 /* reinitialize for possible replay */
2851 flags = 0;
2852 n_iov = 2;
2853 server = cifs_pick_channel(ses);
2854
2855 cifs_dbg(FYI, "mkdir\n");
2856
2857 /* resource #1: path allocation */
2858 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2859 if (!utf16_path)
2860 return -ENOMEM;
2861
2862 if (!ses || !server) {
2863 rc = -EIO;
2864 goto err_free_path;
2865 }
2866
2867 /* resource #2: request */
2868 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2869 (void **) &req, &total_len);
2870 if (rc)
2871 goto err_free_path;
2872
2873
2874 if (smb3_encryption_required(tcon))
2875 flags |= CIFS_TRANSFORM_REQ;
2876
2877 req->ImpersonationLevel = IL_IMPERSONATION;
2878 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2879 /* File attributes ignored on open (used in create though) */
2880 req->FileAttributes = cpu_to_le32(file_attributes);
2881 req->ShareAccess = FILE_SHARE_ALL_LE;
2882 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2883 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2884
2885 iov[0].iov_base = (char *)req;
2886 /* -1 since last byte is buf[0] which is sent below (path) */
2887 iov[0].iov_len = total_len - 1;
2888
2889 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2890
2891 /* [MS-SMB2] 2.2.13 NameOffset:
2892 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2893 * the SMB2 header, the file name includes a prefix that will
2894 * be processed during DFS name normalization as specified in
2895 * section 3.3.5.9. Otherwise, the file name is relative to
2896 * the share that is identified by the TreeId in the SMB2
2897 * header.
2898 */
2899 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2900 int name_len;
2901
2902 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2903 rc = alloc_path_with_tree_prefix(©_path, ©_size,
2904 &name_len,
2905 tcon->tree_name, utf16_path);
2906 if (rc)
2907 goto err_free_req;
2908
2909 req->NameLength = cpu_to_le16(name_len * 2);
2910 uni_path_len = copy_size;
2911 /* free before overwriting resource */
2912 kfree(utf16_path);
2913 utf16_path = copy_path;
2914 } else {
2915 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
2916 /* MUST set path len (NameLength) to 0 opening root of share */
2917 req->NameLength = cpu_to_le16(uni_path_len - 2);
2918 if (uni_path_len % 8 != 0) {
2919 copy_size = roundup(uni_path_len, 8);
2920 copy_path = kzalloc(copy_size, GFP_KERNEL);
2921 if (!copy_path) {
2922 rc = -ENOMEM;
2923 goto err_free_req;
2924 }
2925 memcpy((char *)copy_path, (const char *)utf16_path,
2926 uni_path_len);
2927 uni_path_len = copy_size;
2928 /* free before overwriting resource */
2929 kfree(utf16_path);
2930 utf16_path = copy_path;
2931 }
2932 }
2933
2934 iov[1].iov_len = uni_path_len;
2935 iov[1].iov_base = utf16_path;
2936 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2937
2938 if (tcon->posix_extensions) {
2939 /* resource #3: posix buf */
2940 rc = add_posix_context(iov, &n_iov, mode);
2941 if (rc)
2942 goto err_free_req;
2943 req->CreateContextsOffset = cpu_to_le32(
2944 sizeof(struct smb2_create_req) +
2945 iov[1].iov_len);
2946 pc_buf = iov[n_iov-1].iov_base;
2947 }
2948
2949
2950 memset(&rqst, 0, sizeof(struct smb_rqst));
2951 rqst.rq_iov = iov;
2952 rqst.rq_nvec = n_iov;
2953
2954 /* no need to inc num_remote_opens because we close it just below */
2955 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
2956 FILE_WRITE_ATTRIBUTES);
2957
2958 if (retries)
2959 smb2_set_replay(server, &rqst);
2960
2961 /* resource #4: response buffer */
2962 rc = cifs_send_recv(xid, ses, server,
2963 &rqst, &resp_buftype, flags, &rsp_iov);
2964 if (rc) {
2965 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2966 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
2967 CREATE_NOT_FILE,
2968 FILE_WRITE_ATTRIBUTES, rc);
2969 goto err_free_rsp_buf;
2970 }
2971
2972 /*
2973 * Although unlikely to be possible for rsp to be null and rc not set,
2974 * adding check below is slightly safer long term (and quiets Coverity
2975 * warning)
2976 */
2977 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
2978 if (rsp == NULL) {
2979 rc = -EIO;
2980 kfree(pc_buf);
2981 goto err_free_req;
2982 }
2983
2984 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
2985 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
2986
2987 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
2988
2989 /* Eventually save off posix specific response info and timestamps */
2990
2991 err_free_rsp_buf:
2992 free_rsp_buf(resp_buftype, rsp);
2993 kfree(pc_buf);
2994 err_free_req:
2995 cifs_small_buf_release(req);
2996 err_free_path:
2997 kfree(utf16_path);
2998
2999 if (is_replayable_error(rc) &&
3000 smb2_should_replay(tcon, &retries, &cur_sleep))
3001 goto replay_again;
3002
3003 return rc;
3004 }
3005
3006 int
SMB2_open_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,__u8 * oplock,struct cifs_open_parms * oparms,__le16 * path)3007 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3008 struct smb_rqst *rqst, __u8 *oplock,
3009 struct cifs_open_parms *oparms, __le16 *path)
3010 {
3011 struct smb2_create_req *req;
3012 unsigned int n_iov = 2;
3013 __u32 file_attributes = 0;
3014 int copy_size;
3015 int uni_path_len;
3016 unsigned int total_len;
3017 struct kvec *iov = rqst->rq_iov;
3018 __le16 *copy_path;
3019 int rc;
3020
3021 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
3022 (void **) &req, &total_len);
3023 if (rc)
3024 return rc;
3025
3026 iov[0].iov_base = (char *)req;
3027 /* -1 since last byte is buf[0] which is sent below (path) */
3028 iov[0].iov_len = total_len - 1;
3029
3030 if (oparms->create_options & CREATE_OPTION_READONLY)
3031 file_attributes |= ATTR_READONLY;
3032 if (oparms->create_options & CREATE_OPTION_SPECIAL)
3033 file_attributes |= ATTR_SYSTEM;
3034
3035 req->ImpersonationLevel = IL_IMPERSONATION;
3036 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
3037 /* File attributes ignored on open (used in create though) */
3038 req->FileAttributes = cpu_to_le32(file_attributes);
3039 req->ShareAccess = FILE_SHARE_ALL_LE;
3040
3041 req->CreateDisposition = cpu_to_le32(oparms->disposition);
3042 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
3043 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
3044
3045 /* [MS-SMB2] 2.2.13 NameOffset:
3046 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
3047 * the SMB2 header, the file name includes a prefix that will
3048 * be processed during DFS name normalization as specified in
3049 * section 3.3.5.9. Otherwise, the file name is relative to
3050 * the share that is identified by the TreeId in the SMB2
3051 * header.
3052 */
3053 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
3054 int name_len;
3055
3056 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
3057 rc = alloc_path_with_tree_prefix(©_path, ©_size,
3058 &name_len,
3059 tcon->tree_name, path);
3060 if (rc)
3061 return rc;
3062 req->NameLength = cpu_to_le16(name_len * 2);
3063 uni_path_len = copy_size;
3064 path = copy_path;
3065 } else {
3066 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
3067 /* MUST set path len (NameLength) to 0 opening root of share */
3068 req->NameLength = cpu_to_le16(uni_path_len - 2);
3069 copy_size = round_up(uni_path_len, 8);
3070 copy_path = kzalloc(copy_size, GFP_KERNEL);
3071 if (!copy_path)
3072 return -ENOMEM;
3073 memcpy((char *)copy_path, (const char *)path,
3074 uni_path_len);
3075 uni_path_len = copy_size;
3076 path = copy_path;
3077 }
3078
3079 iov[1].iov_len = uni_path_len;
3080 iov[1].iov_base = path;
3081
3082 if ((!server->oplocks) || (tcon->no_lease))
3083 *oplock = SMB2_OPLOCK_LEVEL_NONE;
3084
3085 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3086 *oplock == SMB2_OPLOCK_LEVEL_NONE)
3087 req->RequestedOplockLevel = *oplock;
3088 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3089 (oparms->create_options & CREATE_NOT_FILE))
3090 req->RequestedOplockLevel = *oplock; /* no srv lease support */
3091 else {
3092 rc = add_lease_context(server, req, iov, &n_iov,
3093 oparms->fid->lease_key, oplock);
3094 if (rc)
3095 return rc;
3096 }
3097
3098 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3099 rc = add_durable_context(iov, &n_iov, oparms,
3100 tcon->use_persistent);
3101 if (rc)
3102 return rc;
3103 }
3104
3105 if (tcon->posix_extensions) {
3106 rc = add_posix_context(iov, &n_iov, oparms->mode);
3107 if (rc)
3108 return rc;
3109 }
3110
3111 if (tcon->snapshot_time) {
3112 cifs_dbg(FYI, "adding snapshot context\n");
3113 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
3114 if (rc)
3115 return rc;
3116 }
3117
3118 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
3119 bool set_mode;
3120 bool set_owner;
3121
3122 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
3123 (oparms->mode != ACL_NO_MODE))
3124 set_mode = true;
3125 else {
3126 set_mode = false;
3127 oparms->mode = ACL_NO_MODE;
3128 }
3129
3130 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
3131 set_owner = true;
3132 else
3133 set_owner = false;
3134
3135 if (set_owner | set_mode) {
3136 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
3137 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
3138 if (rc)
3139 return rc;
3140 }
3141 }
3142
3143 add_query_id_context(iov, &n_iov);
3144 add_ea_context(oparms, iov, &n_iov);
3145
3146 if (n_iov > 2) {
3147 /*
3148 * We have create contexts behind iov[1] (the file
3149 * name), point at them from the main create request
3150 */
3151 req->CreateContextsOffset = cpu_to_le32(
3152 sizeof(struct smb2_create_req) +
3153 iov[1].iov_len);
3154 req->CreateContextsLength = 0;
3155
3156 for (unsigned int i = 2; i < (n_iov-1); i++) {
3157 struct kvec *v = &iov[i];
3158 size_t len = v->iov_len;
3159 struct create_context *cctx =
3160 (struct create_context *)v->iov_base;
3161
3162 cctx->Next = cpu_to_le32(len);
3163 le32_add_cpu(&req->CreateContextsLength, len);
3164 }
3165 le32_add_cpu(&req->CreateContextsLength,
3166 iov[n_iov-1].iov_len);
3167 }
3168
3169 rqst->rq_nvec = n_iov;
3170 return 0;
3171 }
3172
3173 /* rq_iov[0] is the request and is released by cifs_small_buf_release().
3174 * All other vectors are freed by kfree().
3175 */
3176 void
SMB2_open_free(struct smb_rqst * rqst)3177 SMB2_open_free(struct smb_rqst *rqst)
3178 {
3179 int i;
3180
3181 if (rqst && rqst->rq_iov) {
3182 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
3183 for (i = 1; i < rqst->rq_nvec; i++)
3184 if (rqst->rq_iov[i].iov_base != smb2_padding)
3185 kfree(rqst->rq_iov[i].iov_base);
3186 }
3187 }
3188
3189 int
SMB2_open(const unsigned int xid,struct cifs_open_parms * oparms,__le16 * path,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix,struct kvec * err_iov,int * buftype)3190 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3191 __u8 *oplock, struct smb2_file_all_info *buf,
3192 struct create_posix_rsp *posix,
3193 struct kvec *err_iov, int *buftype)
3194 {
3195 struct smb_rqst rqst;
3196 struct smb2_create_rsp *rsp = NULL;
3197 struct cifs_tcon *tcon = oparms->tcon;
3198 struct cifs_ses *ses = tcon->ses;
3199 struct TCP_Server_Info *server;
3200 struct kvec iov[SMB2_CREATE_IOV_SIZE];
3201 struct kvec rsp_iov = {NULL, 0};
3202 int resp_buftype = CIFS_NO_BUFFER;
3203 int rc = 0;
3204 int flags = 0;
3205 int retries = 0, cur_sleep = 1;
3206
3207 replay_again:
3208 /* reinitialize for possible replay */
3209 flags = 0;
3210 server = cifs_pick_channel(ses);
3211 oparms->replay = !!(retries);
3212
3213 cifs_dbg(FYI, "create/open\n");
3214 if (!ses || !server)
3215 return -EIO;
3216
3217 if (smb3_encryption_required(tcon))
3218 flags |= CIFS_TRANSFORM_REQ;
3219
3220 memset(&rqst, 0, sizeof(struct smb_rqst));
3221 memset(&iov, 0, sizeof(iov));
3222 rqst.rq_iov = iov;
3223 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
3224
3225 rc = SMB2_open_init(tcon, server,
3226 &rqst, oplock, oparms, path);
3227 if (rc)
3228 goto creat_exit;
3229
3230 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
3231 oparms->create_options, oparms->desired_access);
3232
3233 if (retries)
3234 smb2_set_replay(server, &rqst);
3235
3236 rc = cifs_send_recv(xid, ses, server,
3237 &rqst, &resp_buftype, flags,
3238 &rsp_iov);
3239 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3240
3241 if (rc != 0) {
3242 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3243 if (err_iov && rsp) {
3244 *err_iov = rsp_iov;
3245 *buftype = resp_buftype;
3246 resp_buftype = CIFS_NO_BUFFER;
3247 rsp = NULL;
3248 }
3249 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
3250 oparms->create_options, oparms->desired_access, rc);
3251 if (rc == -EREMCHG) {
3252 pr_warn_once("server share %s deleted\n",
3253 tcon->tree_name);
3254 tcon->need_reconnect = true;
3255 }
3256 goto creat_exit;
3257 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
3258 goto creat_exit;
3259 else
3260 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3261 oparms->create_options, oparms->desired_access);
3262
3263 atomic_inc(&tcon->num_remote_opens);
3264 oparms->fid->persistent_fid = rsp->PersistentFileId;
3265 oparms->fid->volatile_fid = rsp->VolatileFileId;
3266 oparms->fid->access = oparms->desired_access;
3267 #ifdef CONFIG_CIFS_DEBUG2
3268 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
3269 #endif /* CIFS_DEBUG2 */
3270
3271 if (buf) {
3272 buf->CreationTime = rsp->CreationTime;
3273 buf->LastAccessTime = rsp->LastAccessTime;
3274 buf->LastWriteTime = rsp->LastWriteTime;
3275 buf->ChangeTime = rsp->ChangeTime;
3276 buf->AllocationSize = rsp->AllocationSize;
3277 buf->EndOfFile = rsp->EndofFile;
3278 buf->Attributes = rsp->FileAttributes;
3279 buf->NumberOfLinks = cpu_to_le32(1);
3280 buf->DeletePending = 0;
3281 }
3282
3283
3284 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
3285 oparms->fid->lease_key, oplock, buf, posix);
3286 creat_exit:
3287 SMB2_open_free(&rqst);
3288 free_rsp_buf(resp_buftype, rsp);
3289
3290 if (is_replayable_error(rc) &&
3291 smb2_should_replay(tcon, &retries, &cur_sleep))
3292 goto replay_again;
3293
3294 return rc;
3295 }
3296
3297 int
SMB2_ioctl_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,__u32 max_response_size)3298 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3299 struct smb_rqst *rqst,
3300 u64 persistent_fid, u64 volatile_fid, u32 opcode,
3301 char *in_data, u32 indatalen,
3302 __u32 max_response_size)
3303 {
3304 struct smb2_ioctl_req *req;
3305 struct kvec *iov = rqst->rq_iov;
3306 unsigned int total_len;
3307 int rc;
3308 char *in_data_buf;
3309
3310 rc = smb2_ioctl_req_init(opcode, tcon, server,
3311 (void **) &req, &total_len);
3312 if (rc)
3313 return rc;
3314
3315 if (indatalen) {
3316 unsigned int len;
3317
3318 if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
3319 (check_add_overflow(total_len - 1,
3320 ALIGN(indatalen, 8), &len) ||
3321 len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
3322 cifs_small_buf_release(req);
3323 return -EIO;
3324 }
3325 /*
3326 * indatalen is usually small at a couple of bytes max, so
3327 * just allocate through generic pool
3328 */
3329 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
3330 if (!in_data_buf) {
3331 cifs_small_buf_release(req);
3332 return -ENOMEM;
3333 }
3334 }
3335
3336 req->CtlCode = cpu_to_le32(opcode);
3337 req->PersistentFileId = persistent_fid;
3338 req->VolatileFileId = volatile_fid;
3339
3340 iov[0].iov_base = (char *)req;
3341 /*
3342 * If no input data, the size of ioctl struct in
3343 * protocol spec still includes a 1 byte data buffer,
3344 * but if input data passed to ioctl, we do not
3345 * want to double count this, so we do not send
3346 * the dummy one byte of data in iovec[0] if sending
3347 * input data (in iovec[1]).
3348 */
3349 if (indatalen) {
3350 req->InputCount = cpu_to_le32(indatalen);
3351 /* do not set InputOffset if no input data */
3352 req->InputOffset =
3353 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
3354 rqst->rq_nvec = 2;
3355 iov[0].iov_len = total_len - 1;
3356 iov[1].iov_base = in_data_buf;
3357 iov[1].iov_len = indatalen;
3358 } else {
3359 rqst->rq_nvec = 1;
3360 iov[0].iov_len = total_len;
3361 }
3362
3363 req->OutputOffset = 0;
3364 req->OutputCount = 0; /* MBZ */
3365
3366 /*
3367 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3368 * We Could increase default MaxOutputResponse, but that could require
3369 * more credits. Windows typically sets this smaller, but for some
3370 * ioctls it may be useful to allow server to send more. No point
3371 * limiting what the server can send as long as fits in one credit
3372 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3373 * to increase this limit up in the future.
3374 * Note that for snapshot queries that servers like Azure expect that
3375 * the first query be minimal size (and just used to get the number/size
3376 * of previous versions) so response size must be specified as EXACTLY
3377 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3378 * of eight bytes. Currently that is the only case where we set max
3379 * response size smaller.
3380 */
3381 req->MaxOutputResponse = cpu_to_le32(max_response_size);
3382 req->hdr.CreditCharge =
3383 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3384 SMB2_MAX_BUFFER_SIZE));
3385 /* always an FSCTL (for now) */
3386 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3387
3388 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3389 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
3390 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
3391
3392 return 0;
3393 }
3394
3395 void
SMB2_ioctl_free(struct smb_rqst * rqst)3396 SMB2_ioctl_free(struct smb_rqst *rqst)
3397 {
3398 int i;
3399
3400 if (rqst && rqst->rq_iov) {
3401 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3402 for (i = 1; i < rqst->rq_nvec; i++)
3403 if (rqst->rq_iov[i].iov_base != smb2_padding)
3404 kfree(rqst->rq_iov[i].iov_base);
3405 }
3406 }
3407
3408
3409 /*
3410 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3411 */
3412 int
SMB2_ioctl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,u32 max_out_data_len,char ** out_data,u32 * plen)3413 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3414 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
3415 u32 max_out_data_len, char **out_data,
3416 u32 *plen /* returned data len */)
3417 {
3418 struct smb_rqst rqst;
3419 struct smb2_ioctl_rsp *rsp = NULL;
3420 struct cifs_ses *ses;
3421 struct TCP_Server_Info *server;
3422 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3423 struct kvec rsp_iov = {NULL, 0};
3424 int resp_buftype = CIFS_NO_BUFFER;
3425 int rc = 0;
3426 int flags = 0;
3427 int retries = 0, cur_sleep = 1;
3428
3429 if (!tcon)
3430 return -EIO;
3431
3432 ses = tcon->ses;
3433 if (!ses)
3434 return -EIO;
3435
3436 replay_again:
3437 /* reinitialize for possible replay */
3438 flags = 0;
3439 server = cifs_pick_channel(ses);
3440
3441 if (!server)
3442 return -EIO;
3443
3444 cifs_dbg(FYI, "SMB2 IOCTL\n");
3445
3446 if (out_data != NULL)
3447 *out_data = NULL;
3448
3449 /* zero out returned data len, in case of error */
3450 if (plen)
3451 *plen = 0;
3452
3453 if (smb3_encryption_required(tcon))
3454 flags |= CIFS_TRANSFORM_REQ;
3455
3456 memset(&rqst, 0, sizeof(struct smb_rqst));
3457 memset(&iov, 0, sizeof(iov));
3458 rqst.rq_iov = iov;
3459 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
3460
3461 rc = SMB2_ioctl_init(tcon, server,
3462 &rqst, persistent_fid, volatile_fid, opcode,
3463 in_data, indatalen, max_out_data_len);
3464 if (rc)
3465 goto ioctl_exit;
3466
3467 if (retries)
3468 smb2_set_replay(server, &rqst);
3469
3470 rc = cifs_send_recv(xid, ses, server,
3471 &rqst, &resp_buftype, flags,
3472 &rsp_iov);
3473 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
3474
3475 if (rc != 0)
3476 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3477 ses->Suid, 0, opcode, rc);
3478
3479 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
3480 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3481 goto ioctl_exit;
3482 } else if (rc == -EINVAL) {
3483 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3484 (opcode != FSCTL_SRV_COPYCHUNK)) {
3485 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3486 goto ioctl_exit;
3487 }
3488 } else if (rc == -E2BIG) {
3489 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3490 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3491 goto ioctl_exit;
3492 }
3493 }
3494
3495 /* check if caller wants to look at return data or just return rc */
3496 if ((plen == NULL) || (out_data == NULL))
3497 goto ioctl_exit;
3498
3499 /*
3500 * Although unlikely to be possible for rsp to be null and rc not set,
3501 * adding check below is slightly safer long term (and quiets Coverity
3502 * warning)
3503 */
3504 if (rsp == NULL) {
3505 rc = -EIO;
3506 goto ioctl_exit;
3507 }
3508
3509 *plen = le32_to_cpu(rsp->OutputCount);
3510
3511 /* We check for obvious errors in the output buffer length and offset */
3512 if (*plen == 0)
3513 goto ioctl_exit; /* server returned no data */
3514 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
3515 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
3516 *plen = 0;
3517 rc = -EIO;
3518 goto ioctl_exit;
3519 }
3520
3521 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
3522 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
3523 le32_to_cpu(rsp->OutputOffset));
3524 *plen = 0;
3525 rc = -EIO;
3526 goto ioctl_exit;
3527 }
3528
3529 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3530 *plen, GFP_KERNEL);
3531 if (*out_data == NULL) {
3532 rc = -ENOMEM;
3533 goto ioctl_exit;
3534 }
3535
3536 ioctl_exit:
3537 SMB2_ioctl_free(&rqst);
3538 free_rsp_buf(resp_buftype, rsp);
3539
3540 if (is_replayable_error(rc) &&
3541 smb2_should_replay(tcon, &retries, &cur_sleep))
3542 goto replay_again;
3543
3544 return rc;
3545 }
3546
3547 /*
3548 * Individual callers to ioctl worker function follow
3549 */
3550
3551 int
SMB2_set_compression(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3552 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3553 u64 persistent_fid, u64 volatile_fid)
3554 {
3555 int rc;
3556 struct compress_ioctl fsctl_input;
3557 char *ret_data = NULL;
3558
3559 fsctl_input.CompressionState =
3560 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
3561
3562 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3563 FSCTL_SET_COMPRESSION,
3564 (char *)&fsctl_input /* data input */,
3565 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3566 &ret_data /* out data */, NULL);
3567
3568 cifs_dbg(FYI, "set compression rc %d\n", rc);
3569
3570 return rc;
3571 }
3572
3573 int
SMB2_close_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,bool query_attrs)3574 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3575 struct smb_rqst *rqst,
3576 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
3577 {
3578 struct smb2_close_req *req;
3579 struct kvec *iov = rqst->rq_iov;
3580 unsigned int total_len;
3581 int rc;
3582
3583 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3584 (void **) &req, &total_len);
3585 if (rc)
3586 return rc;
3587
3588 req->PersistentFileId = persistent_fid;
3589 req->VolatileFileId = volatile_fid;
3590 if (query_attrs)
3591 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3592 else
3593 req->Flags = 0;
3594 iov[0].iov_base = (char *)req;
3595 iov[0].iov_len = total_len;
3596
3597 return 0;
3598 }
3599
3600 void
SMB2_close_free(struct smb_rqst * rqst)3601 SMB2_close_free(struct smb_rqst *rqst)
3602 {
3603 if (rqst && rqst->rq_iov)
3604 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3605 }
3606
3607 int
__SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_network_open_info * pbuf)3608 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3609 u64 persistent_fid, u64 volatile_fid,
3610 struct smb2_file_network_open_info *pbuf)
3611 {
3612 struct smb_rqst rqst;
3613 struct smb2_close_rsp *rsp = NULL;
3614 struct cifs_ses *ses = tcon->ses;
3615 struct TCP_Server_Info *server;
3616 struct kvec iov[1];
3617 struct kvec rsp_iov;
3618 int resp_buftype = CIFS_NO_BUFFER;
3619 int rc = 0;
3620 int flags = 0;
3621 bool query_attrs = false;
3622 int retries = 0, cur_sleep = 1;
3623
3624 replay_again:
3625 /* reinitialize for possible replay */
3626 flags = 0;
3627 query_attrs = false;
3628 server = cifs_pick_channel(ses);
3629
3630 cifs_dbg(FYI, "Close\n");
3631
3632 if (!ses || !server)
3633 return -EIO;
3634
3635 if (smb3_encryption_required(tcon))
3636 flags |= CIFS_TRANSFORM_REQ;
3637
3638 memset(&rqst, 0, sizeof(struct smb_rqst));
3639 memset(&iov, 0, sizeof(iov));
3640 rqst.rq_iov = iov;
3641 rqst.rq_nvec = 1;
3642
3643 /* check if need to ask server to return timestamps in close response */
3644 if (pbuf)
3645 query_attrs = true;
3646
3647 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3648 rc = SMB2_close_init(tcon, server,
3649 &rqst, persistent_fid, volatile_fid,
3650 query_attrs);
3651 if (rc)
3652 goto close_exit;
3653
3654 if (retries)
3655 smb2_set_replay(server, &rqst);
3656
3657 rc = cifs_send_recv(xid, ses, server,
3658 &rqst, &resp_buftype, flags, &rsp_iov);
3659 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
3660
3661 if (rc != 0) {
3662 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
3663 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3664 rc);
3665 goto close_exit;
3666 } else {
3667 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3668 ses->Suid);
3669 if (pbuf)
3670 memcpy(&pbuf->network_open_info,
3671 &rsp->network_open_info,
3672 sizeof(pbuf->network_open_info));
3673 atomic_dec(&tcon->num_remote_opens);
3674 }
3675
3676 close_exit:
3677 SMB2_close_free(&rqst);
3678 free_rsp_buf(resp_buftype, rsp);
3679
3680 /* retry close in a worker thread if this one is interrupted */
3681 if (is_interrupt_error(rc)) {
3682 int tmp_rc;
3683
3684 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3685 volatile_fid);
3686 if (tmp_rc)
3687 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3688 persistent_fid, tmp_rc);
3689 }
3690
3691 if (is_replayable_error(rc) &&
3692 smb2_should_replay(tcon, &retries, &cur_sleep))
3693 goto replay_again;
3694
3695 return rc;
3696 }
3697
3698 int
SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3699 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3700 u64 persistent_fid, u64 volatile_fid)
3701 {
3702 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3703 }
3704
3705 int
smb2_validate_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int min_buf_size)3706 smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3707 struct kvec *iov, unsigned int min_buf_size)
3708 {
3709 unsigned int smb_len = iov->iov_len;
3710 char *end_of_smb = smb_len + (char *)iov->iov_base;
3711 char *begin_of_buf = offset + (char *)iov->iov_base;
3712 char *end_of_buf = begin_of_buf + buffer_length;
3713
3714
3715 if (buffer_length < min_buf_size) {
3716 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3717 buffer_length, min_buf_size);
3718 return -EINVAL;
3719 }
3720
3721 /* check if beyond RFC1001 maximum length */
3722 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
3723 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3724 buffer_length, smb_len);
3725 return -EINVAL;
3726 }
3727
3728 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
3729 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
3730 return -EINVAL;
3731 }
3732
3733 return 0;
3734 }
3735
3736 /*
3737 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3738 * Caller must free buffer.
3739 */
3740 int
smb2_validate_and_copy_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int minbufsize,char * data)3741 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3742 struct kvec *iov, unsigned int minbufsize,
3743 char *data)
3744 {
3745 char *begin_of_buf = offset + (char *)iov->iov_base;
3746 int rc;
3747
3748 if (!data)
3749 return -EINVAL;
3750
3751 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3752 if (rc)
3753 return rc;
3754
3755 memcpy(data, begin_of_buf, minbufsize);
3756
3757 return 0;
3758 }
3759
3760 int
SMB2_query_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t input_len,void * input)3761 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3762 struct smb_rqst *rqst,
3763 u64 persistent_fid, u64 volatile_fid,
3764 u8 info_class, u8 info_type, u32 additional_info,
3765 size_t output_len, size_t input_len, void *input)
3766 {
3767 struct smb2_query_info_req *req;
3768 struct kvec *iov = rqst->rq_iov;
3769 unsigned int total_len;
3770 size_t len;
3771 int rc;
3772
3773 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
3774 len > CIFSMaxBufSize))
3775 return -EINVAL;
3776
3777 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3778 (void **) &req, &total_len);
3779 if (rc)
3780 return rc;
3781
3782 req->InfoType = info_type;
3783 req->FileInfoClass = info_class;
3784 req->PersistentFileId = persistent_fid;
3785 req->VolatileFileId = volatile_fid;
3786 req->AdditionalInformation = cpu_to_le32(additional_info);
3787
3788 req->OutputBufferLength = cpu_to_le32(output_len);
3789 if (input_len) {
3790 req->InputBufferLength = cpu_to_le32(input_len);
3791 /* total_len for smb query request never close to le16 max */
3792 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3793 memcpy(req->Buffer, input, input_len);
3794 }
3795
3796 iov[0].iov_base = (char *)req;
3797 /* 1 for Buffer */
3798 iov[0].iov_len = len;
3799 return 0;
3800 }
3801
3802 void
SMB2_query_info_free(struct smb_rqst * rqst)3803 SMB2_query_info_free(struct smb_rqst *rqst)
3804 {
3805 if (rqst && rqst->rq_iov)
3806 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
3807 }
3808
3809 static int
query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t min_len,void ** data,u32 * dlen)3810 query_info(const unsigned int xid, struct cifs_tcon *tcon,
3811 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3812 u32 additional_info, size_t output_len, size_t min_len, void **data,
3813 u32 *dlen)
3814 {
3815 struct smb_rqst rqst;
3816 struct smb2_query_info_rsp *rsp = NULL;
3817 struct kvec iov[1];
3818 struct kvec rsp_iov;
3819 int rc = 0;
3820 int resp_buftype = CIFS_NO_BUFFER;
3821 struct cifs_ses *ses = tcon->ses;
3822 struct TCP_Server_Info *server;
3823 int flags = 0;
3824 bool allocated = false;
3825 int retries = 0, cur_sleep = 1;
3826
3827 cifs_dbg(FYI, "Query Info\n");
3828
3829 if (!ses)
3830 return -EIO;
3831
3832 replay_again:
3833 /* reinitialize for possible replay */
3834 flags = 0;
3835 allocated = false;
3836 server = cifs_pick_channel(ses);
3837
3838 if (!server)
3839 return -EIO;
3840
3841 if (smb3_encryption_required(tcon))
3842 flags |= CIFS_TRANSFORM_REQ;
3843
3844 memset(&rqst, 0, sizeof(struct smb_rqst));
3845 memset(&iov, 0, sizeof(iov));
3846 rqst.rq_iov = iov;
3847 rqst.rq_nvec = 1;
3848
3849 rc = SMB2_query_info_init(tcon, server,
3850 &rqst, persistent_fid, volatile_fid,
3851 info_class, info_type, additional_info,
3852 output_len, 0, NULL);
3853 if (rc)
3854 goto qinf_exit;
3855
3856 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3857 ses->Suid, info_class, (__u32)info_type);
3858
3859 if (retries)
3860 smb2_set_replay(server, &rqst);
3861
3862 rc = cifs_send_recv(xid, ses, server,
3863 &rqst, &resp_buftype, flags, &rsp_iov);
3864 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3865
3866 if (rc) {
3867 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
3868 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3869 ses->Suid, info_class, (__u32)info_type, rc);
3870 goto qinf_exit;
3871 }
3872
3873 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3874 ses->Suid, info_class, (__u32)info_type);
3875
3876 if (dlen) {
3877 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3878 if (!*data) {
3879 *data = kmalloc(*dlen, GFP_KERNEL);
3880 if (!*data) {
3881 cifs_tcon_dbg(VFS,
3882 "Error %d allocating memory for acl\n",
3883 rc);
3884 *dlen = 0;
3885 rc = -ENOMEM;
3886 goto qinf_exit;
3887 }
3888 allocated = true;
3889 }
3890 }
3891
3892 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3893 le32_to_cpu(rsp->OutputBufferLength),
3894 &rsp_iov, dlen ? *dlen : min_len, *data);
3895 if (rc && allocated) {
3896 kfree(*data);
3897 *data = NULL;
3898 *dlen = 0;
3899 }
3900
3901 qinf_exit:
3902 SMB2_query_info_free(&rqst);
3903 free_rsp_buf(resp_buftype, rsp);
3904
3905 if (is_replayable_error(rc) &&
3906 smb2_should_replay(tcon, &retries, &cur_sleep))
3907 goto replay_again;
3908
3909 return rc;
3910 }
3911
SMB2_query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_all_info * data)3912 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3913 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
3914 {
3915 return query_info(xid, tcon, persistent_fid, volatile_fid,
3916 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
3917 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3918 sizeof(struct smb2_file_all_info), (void **)&data,
3919 NULL);
3920 }
3921
3922 #if 0
3923 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
3924 int
3925 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3926 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3927 {
3928 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3929 (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
3930 *plen = 0;
3931
3932 return query_info(xid, tcon, persistent_fid, volatile_fid,
3933 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3934 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
3935 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
3936 }
3937 #endif
3938
3939 int
SMB2_query_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,void ** data,u32 * plen,u32 extra_info)3940 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
3941 u64 persistent_fid, u64 volatile_fid,
3942 void **data, u32 *plen, u32 extra_info)
3943 {
3944 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3945 extra_info;
3946 *plen = 0;
3947
3948 return query_info(xid, tcon, persistent_fid, volatile_fid,
3949 0, SMB2_O_INFO_SECURITY, additional_info,
3950 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
3951 }
3952
3953 int
SMB2_get_srv_num(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,__le64 * uniqueid)3954 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3955 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3956 {
3957 return query_info(xid, tcon, persistent_fid, volatile_fid,
3958 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
3959 sizeof(struct smb2_file_internal_info),
3960 sizeof(struct smb2_file_internal_info),
3961 (void **)&uniqueid, NULL);
3962 }
3963
3964 /*
3965 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3966 * See MS-SMB2 2.2.35 and 2.2.36
3967 */
3968
3969 static int
SMB2_notify_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid,u32 completion_filter,bool watch_tree)3970 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
3971 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3972 u64 persistent_fid, u64 volatile_fid,
3973 u32 completion_filter, bool watch_tree)
3974 {
3975 struct smb2_change_notify_req *req;
3976 struct kvec *iov = rqst->rq_iov;
3977 unsigned int total_len;
3978 int rc;
3979
3980 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3981 (void **) &req, &total_len);
3982 if (rc)
3983 return rc;
3984
3985 req->PersistentFileId = persistent_fid;
3986 req->VolatileFileId = volatile_fid;
3987 /* See note 354 of MS-SMB2, 64K max */
3988 req->OutputBufferLength =
3989 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
3990 req->CompletionFilter = cpu_to_le32(completion_filter);
3991 if (watch_tree)
3992 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
3993 else
3994 req->Flags = 0;
3995
3996 iov[0].iov_base = (char *)req;
3997 iov[0].iov_len = total_len;
3998
3999 return 0;
4000 }
4001
4002 int
SMB2_change_notify(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,bool watch_tree,u32 completion_filter,u32 max_out_data_len,char ** out_data,u32 * plen)4003 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
4004 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
4005 u32 completion_filter, u32 max_out_data_len, char **out_data,
4006 u32 *plen /* returned data len */)
4007 {
4008 struct cifs_ses *ses = tcon->ses;
4009 struct TCP_Server_Info *server;
4010 struct smb_rqst rqst;
4011 struct smb2_change_notify_rsp *smb_rsp;
4012 struct kvec iov[1];
4013 struct kvec rsp_iov = {NULL, 0};
4014 int resp_buftype = CIFS_NO_BUFFER;
4015 int flags = 0;
4016 int rc = 0;
4017 int retries = 0, cur_sleep = 1;
4018
4019 replay_again:
4020 /* reinitialize for possible replay */
4021 flags = 0;
4022 server = cifs_pick_channel(ses);
4023
4024 cifs_dbg(FYI, "change notify\n");
4025 if (!ses || !server)
4026 return -EIO;
4027
4028 if (smb3_encryption_required(tcon))
4029 flags |= CIFS_TRANSFORM_REQ;
4030
4031 memset(&rqst, 0, sizeof(struct smb_rqst));
4032 memset(&iov, 0, sizeof(iov));
4033 if (plen)
4034 *plen = 0;
4035
4036 rqst.rq_iov = iov;
4037 rqst.rq_nvec = 1;
4038
4039 rc = SMB2_notify_init(xid, &rqst, tcon, server,
4040 persistent_fid, volatile_fid,
4041 completion_filter, watch_tree);
4042 if (rc)
4043 goto cnotify_exit;
4044
4045 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
4046 (u8)watch_tree, completion_filter);
4047
4048 if (retries)
4049 smb2_set_replay(server, &rqst);
4050
4051 rc = cifs_send_recv(xid, ses, server,
4052 &rqst, &resp_buftype, flags, &rsp_iov);
4053
4054 if (rc != 0) {
4055 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
4056 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
4057 (u8)watch_tree, completion_filter, rc);
4058 } else {
4059 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
4060 ses->Suid, (u8)watch_tree, completion_filter);
4061 /* validate that notify information is plausible */
4062 if ((rsp_iov.iov_base == NULL) ||
4063 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1))
4064 goto cnotify_exit;
4065
4066 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
4067
4068 smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
4069 le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
4070 sizeof(struct file_notify_information));
4071
4072 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
4073 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
4074 if (*out_data == NULL) {
4075 rc = -ENOMEM;
4076 goto cnotify_exit;
4077 } else if (plen)
4078 *plen = le32_to_cpu(smb_rsp->OutputBufferLength);
4079 }
4080
4081 cnotify_exit:
4082 if (rqst.rq_iov)
4083 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
4084 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4085
4086 if (is_replayable_error(rc) &&
4087 smb2_should_replay(tcon, &retries, &cur_sleep))
4088 goto replay_again;
4089
4090 return rc;
4091 }
4092
4093
4094
4095 /*
4096 * This is a no-op for now. We're not really interested in the reply, but
4097 * rather in the fact that the server sent one and that server->lstrp
4098 * gets updated.
4099 *
4100 * FIXME: maybe we should consider checking that the reply matches request?
4101 */
4102 static void
smb2_echo_callback(struct mid_q_entry * mid)4103 smb2_echo_callback(struct mid_q_entry *mid)
4104 {
4105 struct TCP_Server_Info *server = mid->callback_data;
4106 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
4107 struct cifs_credits credits = { .value = 0, .instance = 0 };
4108
4109 if (mid->mid_state == MID_RESPONSE_RECEIVED
4110 || mid->mid_state == MID_RESPONSE_MALFORMED) {
4111 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4112 credits.instance = server->reconnect_instance;
4113 }
4114
4115 release_mid(mid);
4116 add_credits(server, &credits, CIFS_ECHO_OP);
4117 }
4118
smb2_reconnect_server(struct work_struct * work)4119 void smb2_reconnect_server(struct work_struct *work)
4120 {
4121 struct TCP_Server_Info *server = container_of(work,
4122 struct TCP_Server_Info, reconnect.work);
4123 struct TCP_Server_Info *pserver;
4124 struct cifs_ses *ses, *ses2;
4125 struct cifs_tcon *tcon, *tcon2;
4126 struct list_head tmp_list, tmp_ses_list;
4127 bool ses_exist = false;
4128 bool tcon_selected = false;
4129 int rc;
4130 bool resched = false;
4131
4132 /* first check if ref count has reached 0, if not inc ref count */
4133 spin_lock(&cifs_tcp_ses_lock);
4134 if (!server->srv_count) {
4135 spin_unlock(&cifs_tcp_ses_lock);
4136 return;
4137 }
4138 server->srv_count++;
4139 spin_unlock(&cifs_tcp_ses_lock);
4140
4141 /* If server is a channel, select the primary channel */
4142 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4143
4144 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
4145 mutex_lock(&pserver->reconnect_mutex);
4146
4147 /* if the server is marked for termination, drop the ref count here */
4148 if (server->terminate) {
4149 cifs_put_tcp_session(server, true);
4150 mutex_unlock(&pserver->reconnect_mutex);
4151 return;
4152 }
4153
4154 INIT_LIST_HEAD(&tmp_list);
4155 INIT_LIST_HEAD(&tmp_ses_list);
4156 cifs_dbg(FYI, "Reconnecting tcons and channels\n");
4157
4158 spin_lock(&cifs_tcp_ses_lock);
4159 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4160 spin_lock(&ses->ses_lock);
4161 if (ses->ses_status == SES_EXITING) {
4162 spin_unlock(&ses->ses_lock);
4163 continue;
4164 }
4165 spin_unlock(&ses->ses_lock);
4166
4167 tcon_selected = false;
4168
4169 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
4170 if (tcon->need_reconnect || tcon->need_reopen_files) {
4171 tcon->tc_count++;
4172 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
4173 netfs_trace_tcon_ref_get_reconnect_server);
4174 list_add_tail(&tcon->rlist, &tmp_list);
4175 tcon_selected = true;
4176 }
4177 }
4178 /*
4179 * IPC has the same lifetime as its session and uses its
4180 * refcount.
4181 */
4182 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
4183 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
4184 tcon_selected = true;
4185 cifs_smb_ses_inc_refcount(ses);
4186 }
4187 /*
4188 * handle the case where channel needs to reconnect
4189 * binding session, but tcon is healthy (some other channel
4190 * is active)
4191 */
4192 spin_lock(&ses->chan_lock);
4193 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
4194 list_add_tail(&ses->rlist, &tmp_ses_list);
4195 ses_exist = true;
4196 cifs_smb_ses_inc_refcount(ses);
4197 }
4198 spin_unlock(&ses->chan_lock);
4199 }
4200 spin_unlock(&cifs_tcp_ses_lock);
4201
4202 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
4203 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4204 if (!rc)
4205 cifs_reopen_persistent_handles(tcon);
4206 else
4207 resched = true;
4208 list_del_init(&tcon->rlist);
4209 if (tcon->ipc)
4210 cifs_put_smb_ses(tcon->ses);
4211 else
4212 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
4213 }
4214
4215 if (!ses_exist)
4216 goto done;
4217
4218 /* allocate a dummy tcon struct used for reconnect */
4219 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
4220 if (!tcon) {
4221 resched = true;
4222 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4223 list_del_init(&ses->rlist);
4224 cifs_put_smb_ses(ses);
4225 }
4226 goto done;
4227 }
4228
4229 tcon->status = TID_GOOD;
4230 tcon->retry = false;
4231 tcon->need_reconnect = false;
4232
4233 /* now reconnect sessions for necessary channels */
4234 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4235 tcon->ses = ses;
4236 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4237 if (rc)
4238 resched = true;
4239 list_del_init(&ses->rlist);
4240 cifs_put_smb_ses(ses);
4241 }
4242 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
4243
4244 done:
4245 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
4246 if (resched)
4247 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
4248 mutex_unlock(&pserver->reconnect_mutex);
4249
4250 /* now we can safely release srv struct */
4251 cifs_put_tcp_session(server, true);
4252 }
4253
4254 int
SMB2_echo(struct TCP_Server_Info * server)4255 SMB2_echo(struct TCP_Server_Info *server)
4256 {
4257 struct smb2_echo_req *req;
4258 int rc = 0;
4259 struct kvec iov[1];
4260 struct smb_rqst rqst = { .rq_iov = iov,
4261 .rq_nvec = 1 };
4262 unsigned int total_len;
4263
4264 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
4265
4266 spin_lock(&server->srv_lock);
4267 if (server->ops->need_neg &&
4268 server->ops->need_neg(server)) {
4269 spin_unlock(&server->srv_lock);
4270 /* No need to send echo on newly established connections */
4271 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
4272 return rc;
4273 }
4274 spin_unlock(&server->srv_lock);
4275
4276 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
4277 (void **)&req, &total_len);
4278 if (rc)
4279 return rc;
4280
4281 req->hdr.CreditRequest = cpu_to_le16(1);
4282
4283 iov[0].iov_len = total_len;
4284 iov[0].iov_base = (char *)req;
4285
4286 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
4287 server, CIFS_ECHO_OP, NULL);
4288 if (rc)
4289 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
4290
4291 cifs_small_buf_release(req);
4292 return rc;
4293 }
4294
4295 void
SMB2_flush_free(struct smb_rqst * rqst)4296 SMB2_flush_free(struct smb_rqst *rqst)
4297 {
4298 if (rqst && rqst->rq_iov)
4299 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4300 }
4301
4302 int
SMB2_flush_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid)4303 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
4304 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4305 u64 persistent_fid, u64 volatile_fid)
4306 {
4307 struct smb2_flush_req *req;
4308 struct kvec *iov = rqst->rq_iov;
4309 unsigned int total_len;
4310 int rc;
4311
4312 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
4313 (void **) &req, &total_len);
4314 if (rc)
4315 return rc;
4316
4317 req->PersistentFileId = persistent_fid;
4318 req->VolatileFileId = volatile_fid;
4319
4320 iov[0].iov_base = (char *)req;
4321 iov[0].iov_len = total_len;
4322
4323 return 0;
4324 }
4325
4326 int
SMB2_flush(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)4327 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
4328 u64 volatile_fid)
4329 {
4330 struct cifs_ses *ses = tcon->ses;
4331 struct smb_rqst rqst;
4332 struct kvec iov[1];
4333 struct kvec rsp_iov = {NULL, 0};
4334 struct TCP_Server_Info *server;
4335 int resp_buftype = CIFS_NO_BUFFER;
4336 int flags = 0;
4337 int rc = 0;
4338 int retries = 0, cur_sleep = 1;
4339
4340 replay_again:
4341 /* reinitialize for possible replay */
4342 flags = 0;
4343 server = cifs_pick_channel(ses);
4344
4345 cifs_dbg(FYI, "flush\n");
4346 if (!ses || !(ses->server))
4347 return -EIO;
4348
4349 if (smb3_encryption_required(tcon))
4350 flags |= CIFS_TRANSFORM_REQ;
4351
4352 memset(&rqst, 0, sizeof(struct smb_rqst));
4353 memset(&iov, 0, sizeof(iov));
4354 rqst.rq_iov = iov;
4355 rqst.rq_nvec = 1;
4356
4357 rc = SMB2_flush_init(xid, &rqst, tcon, server,
4358 persistent_fid, volatile_fid);
4359 if (rc)
4360 goto flush_exit;
4361
4362 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
4363
4364 if (retries)
4365 smb2_set_replay(server, &rqst);
4366
4367 rc = cifs_send_recv(xid, ses, server,
4368 &rqst, &resp_buftype, flags, &rsp_iov);
4369
4370 if (rc != 0) {
4371 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
4372 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
4373 rc);
4374 } else
4375 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
4376 ses->Suid);
4377
4378 flush_exit:
4379 SMB2_flush_free(&rqst);
4380 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4381
4382 if (is_replayable_error(rc) &&
4383 smb2_should_replay(tcon, &retries, &cur_sleep))
4384 goto replay_again;
4385
4386 return rc;
4387 }
4388
4389 #ifdef CONFIG_CIFS_SMB_DIRECT
smb3_use_rdma_offload(struct cifs_io_parms * io_parms)4390 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
4391 {
4392 struct TCP_Server_Info *server = io_parms->server;
4393 struct cifs_tcon *tcon = io_parms->tcon;
4394
4395 /* we can only offload if we're connected */
4396 if (!server || !tcon)
4397 return false;
4398
4399 /* we can only offload on an rdma connection */
4400 if (!server->rdma || !server->smbd_conn)
4401 return false;
4402
4403 /* we don't support signed offload yet */
4404 if (server->sign)
4405 return false;
4406
4407 /* we don't support encrypted offload yet */
4408 if (smb3_encryption_required(tcon))
4409 return false;
4410
4411 /* offload also has its overhead, so only do it if desired */
4412 if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
4413 return false;
4414
4415 return true;
4416 }
4417 #endif /* CONFIG_CIFS_SMB_DIRECT */
4418
4419 /*
4420 * To form a chain of read requests, any read requests after the first should
4421 * have the end_of_chain boolean set to true.
4422 */
4423 static int
smb2_new_read_req(void ** buf,unsigned int * total_len,struct cifs_io_parms * io_parms,struct cifs_io_subrequest * rdata,unsigned int remaining_bytes,int request_type)4424 smb2_new_read_req(void **buf, unsigned int *total_len,
4425 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata,
4426 unsigned int remaining_bytes, int request_type)
4427 {
4428 int rc = -EACCES;
4429 struct smb2_read_req *req = NULL;
4430 struct smb2_hdr *shdr;
4431 struct TCP_Server_Info *server = io_parms->server;
4432
4433 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
4434 (void **) &req, total_len);
4435 if (rc)
4436 return rc;
4437
4438 if (server == NULL)
4439 return -ECONNABORTED;
4440
4441 shdr = &req->hdr;
4442 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4443
4444 req->PersistentFileId = io_parms->persistent_fid;
4445 req->VolatileFileId = io_parms->volatile_fid;
4446 req->ReadChannelInfoOffset = 0; /* reserved */
4447 req->ReadChannelInfoLength = 0; /* reserved */
4448 req->Channel = 0; /* reserved */
4449 req->MinimumCount = 0;
4450 req->Length = cpu_to_le32(io_parms->length);
4451 req->Offset = cpu_to_le64(io_parms->offset);
4452
4453 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0,
4454 rdata ? rdata->subreq.debug_index : 0,
4455 rdata ? rdata->xid : 0,
4456 io_parms->persistent_fid,
4457 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4458 io_parms->offset, io_parms->length);
4459 #ifdef CONFIG_CIFS_SMB_DIRECT
4460 /*
4461 * If we want to do a RDMA write, fill in and append
4462 * smbd_buffer_descriptor_v1 to the end of read request
4463 */
4464 if (rdata && smb3_use_rdma_offload(io_parms)) {
4465 struct smbd_buffer_descriptor_v1 *v1;
4466 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4467
4468 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
4469 true, need_invalidate);
4470 if (!rdata->mr)
4471 return -EAGAIN;
4472
4473 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4474 if (need_invalidate)
4475 req->Channel = SMB2_CHANNEL_RDMA_V1;
4476 req->ReadChannelInfoOffset =
4477 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
4478 req->ReadChannelInfoLength =
4479 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
4480 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
4481 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
4482 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
4483 v1->length = cpu_to_le32(rdata->mr->mr->length);
4484
4485 *total_len += sizeof(*v1) - 1;
4486 }
4487 #endif
4488 if (request_type & CHAINED_REQUEST) {
4489 if (!(request_type & END_OF_CHAIN)) {
4490 /* next 8-byte aligned request */
4491 *total_len = ALIGN(*total_len, 8);
4492 shdr->NextCommand = cpu_to_le32(*total_len);
4493 } else /* END_OF_CHAIN */
4494 shdr->NextCommand = 0;
4495 if (request_type & RELATED_REQUEST) {
4496 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
4497 /*
4498 * Related requests use info from previous read request
4499 * in chain.
4500 */
4501 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
4502 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
4503 req->PersistentFileId = (u64)-1;
4504 req->VolatileFileId = (u64)-1;
4505 }
4506 }
4507 if (remaining_bytes > io_parms->length)
4508 req->RemainingBytes = cpu_to_le32(remaining_bytes);
4509 else
4510 req->RemainingBytes = 0;
4511
4512 *buf = req;
4513 return rc;
4514 }
4515
smb2_readv_worker(struct work_struct * work)4516 static void smb2_readv_worker(struct work_struct *work)
4517 {
4518 struct cifs_io_subrequest *rdata =
4519 container_of(work, struct cifs_io_subrequest, subreq.work);
4520
4521 netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
4522 }
4523
4524 static void
smb2_readv_callback(struct mid_q_entry * mid)4525 smb2_readv_callback(struct mid_q_entry *mid)
4526 {
4527 struct cifs_io_subrequest *rdata = mid->callback_data;
4528 struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
4529 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4530 struct TCP_Server_Info *server = rdata->server;
4531 struct smb2_hdr *shdr =
4532 (struct smb2_hdr *)rdata->iov[0].iov_base;
4533 struct cifs_credits credits = {
4534 .value = 0,
4535 .instance = 0,
4536 .rreq_debug_id = rdata->rreq->debug_id,
4537 .rreq_debug_index = rdata->subreq.debug_index,
4538 };
4539 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 };
4540 unsigned int rreq_debug_id = rdata->rreq->debug_id;
4541 unsigned int subreq_debug_index = rdata->subreq.debug_index;
4542
4543 if (rdata->got_bytes) {
4544 rqst.rq_iter = rdata->subreq.io_iter;
4545 }
4546
4547 WARN_ONCE(rdata->server != mid->server,
4548 "rdata server %p != mid server %p",
4549 rdata->server, mid->server);
4550
4551 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
4552 __func__, mid->mid, mid->mid_state, rdata->result,
4553 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
4554
4555 switch (mid->mid_state) {
4556 case MID_RESPONSE_RECEIVED:
4557 credits.value = le16_to_cpu(shdr->CreditRequest);
4558 credits.instance = server->reconnect_instance;
4559 /* result already set, check signature */
4560 if (server->sign && !mid->decrypted) {
4561 int rc;
4562
4563 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
4564 rc = smb2_verify_signature(&rqst, server);
4565 if (rc)
4566 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
4567 rc);
4568 }
4569 /* FIXME: should this be counted toward the initiating task? */
4570 task_io_account_read(rdata->got_bytes);
4571 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4572 break;
4573 case MID_REQUEST_SUBMITTED:
4574 case MID_RETRY_NEEDED:
4575 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4576 rdata->result = -EAGAIN;
4577 if (server->sign && rdata->got_bytes)
4578 /* reset bytes number since we can not check a sign */
4579 rdata->got_bytes = 0;
4580 /* FIXME: should this be counted toward the initiating task? */
4581 task_io_account_read(rdata->got_bytes);
4582 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4583 break;
4584 case MID_RESPONSE_MALFORMED:
4585 credits.value = le16_to_cpu(shdr->CreditRequest);
4586 credits.instance = server->reconnect_instance;
4587 fallthrough;
4588 default:
4589 rdata->result = -EIO;
4590 }
4591 #ifdef CONFIG_CIFS_SMB_DIRECT
4592 /*
4593 * If this rdata has a memory registered, the MR can be freed
4594 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4595 * because they have limited number and are used for future I/Os
4596 */
4597 if (rdata->mr) {
4598 smbd_deregister_mr(rdata->mr);
4599 rdata->mr = NULL;
4600 }
4601 #endif
4602 if (rdata->result && rdata->result != -ENODATA) {
4603 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
4604 trace_smb3_read_err(rdata->rreq->debug_id,
4605 rdata->subreq.debug_index,
4606 rdata->xid,
4607 rdata->req->cfile->fid.persistent_fid,
4608 tcon->tid, tcon->ses->Suid,
4609 rdata->subreq.start + rdata->subreq.transferred,
4610 rdata->subreq.len - rdata->subreq.transferred,
4611 rdata->result);
4612 } else
4613 trace_smb3_read_done(rdata->rreq->debug_id,
4614 rdata->subreq.debug_index,
4615 rdata->xid,
4616 rdata->req->cfile->fid.persistent_fid,
4617 tcon->tid, tcon->ses->Suid,
4618 rdata->subreq.start + rdata->subreq.transferred,
4619 rdata->got_bytes);
4620
4621 if (rdata->result == -ENODATA) {
4622 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4623 rdata->result = 0;
4624 } else {
4625 size_t trans = rdata->subreq.transferred + rdata->got_bytes;
4626 if (trans < rdata->subreq.len &&
4627 rdata->subreq.start + trans == ictx->remote_i_size) {
4628 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4629 rdata->result = 0;
4630 }
4631 }
4632 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
4633 server->credits, server->in_flight,
4634 0, cifs_trace_rw_credits_read_response_clear);
4635 rdata->credits.value = 0;
4636 rdata->subreq.transferred += rdata->got_bytes;
4637 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
4638 INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
4639 queue_work(cifsiod_wq, &rdata->subreq.work);
4640 release_mid(mid);
4641 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4642 server->credits, server->in_flight,
4643 credits.value, cifs_trace_rw_credits_read_response_add);
4644 add_credits(server, &credits, 0);
4645 }
4646
4647 /* smb2_async_readv - send an async read, and set up mid to handle result */
4648 int
smb2_async_readv(struct cifs_io_subrequest * rdata)4649 smb2_async_readv(struct cifs_io_subrequest *rdata)
4650 {
4651 int rc, flags = 0;
4652 char *buf;
4653 struct netfs_io_subrequest *subreq = &rdata->subreq;
4654 struct smb2_hdr *shdr;
4655 struct cifs_io_parms io_parms;
4656 struct smb_rqst rqst = { .rq_iov = rdata->iov,
4657 .rq_nvec = 1 };
4658 struct TCP_Server_Info *server;
4659 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4660 unsigned int total_len;
4661 int credit_request;
4662
4663 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
4664 __func__, subreq->start, subreq->len);
4665
4666 if (!rdata->server)
4667 rdata->server = cifs_pick_channel(tcon->ses);
4668
4669 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
4670 io_parms.server = server = rdata->server;
4671 io_parms.offset = subreq->start + subreq->transferred;
4672 io_parms.length = subreq->len - subreq->transferred;
4673 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
4674 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
4675 io_parms.pid = rdata->req->pid;
4676
4677 rc = smb2_new_read_req(
4678 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
4679 if (rc)
4680 return rc;
4681
4682 if (smb3_encryption_required(io_parms.tcon))
4683 flags |= CIFS_TRANSFORM_REQ;
4684
4685 rdata->iov[0].iov_base = buf;
4686 rdata->iov[0].iov_len = total_len;
4687 rdata->got_bytes = 0;
4688 rdata->result = 0;
4689
4690 shdr = (struct smb2_hdr *)buf;
4691
4692 if (rdata->credits.value > 0) {
4693 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
4694 SMB2_MAX_BUFFER_SIZE));
4695 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
4696 if (server->credits >= server->max_credits)
4697 shdr->CreditRequest = cpu_to_le16(0);
4698 else
4699 shdr->CreditRequest = cpu_to_le16(
4700 min_t(int, server->max_credits -
4701 server->credits, credit_request));
4702
4703 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust);
4704 if (rc)
4705 goto async_readv_out;
4706
4707 flags |= CIFS_HAS_CREDITS;
4708 }
4709
4710 rc = cifs_call_async(server, &rqst,
4711 cifs_readv_receive, smb2_readv_callback,
4712 smb3_handle_read_data, rdata, flags,
4713 &rdata->credits);
4714 if (rc) {
4715 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
4716 trace_smb3_read_err(rdata->rreq->debug_id,
4717 subreq->debug_index,
4718 rdata->xid, io_parms.persistent_fid,
4719 io_parms.tcon->tid,
4720 io_parms.tcon->ses->Suid,
4721 io_parms.offset,
4722 subreq->len - subreq->transferred, rc);
4723 }
4724
4725 async_readv_out:
4726 cifs_small_buf_release(buf);
4727 return rc;
4728 }
4729
4730 int
SMB2_read(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,char ** buf,int * buf_type)4731 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4732 unsigned int *nbytes, char **buf, int *buf_type)
4733 {
4734 struct smb_rqst rqst;
4735 int resp_buftype, rc;
4736 struct smb2_read_req *req = NULL;
4737 struct smb2_read_rsp *rsp = NULL;
4738 struct kvec iov[1];
4739 struct kvec rsp_iov;
4740 unsigned int total_len;
4741 int flags = CIFS_LOG_ERROR;
4742 struct cifs_ses *ses = io_parms->tcon->ses;
4743
4744 if (!io_parms->server)
4745 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4746
4747 *nbytes = 0;
4748 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
4749 if (rc)
4750 return rc;
4751
4752 if (smb3_encryption_required(io_parms->tcon))
4753 flags |= CIFS_TRANSFORM_REQ;
4754
4755 iov[0].iov_base = (char *)req;
4756 iov[0].iov_len = total_len;
4757
4758 memset(&rqst, 0, sizeof(struct smb_rqst));
4759 rqst.rq_iov = iov;
4760 rqst.rq_nvec = 1;
4761
4762 rc = cifs_send_recv(xid, ses, io_parms->server,
4763 &rqst, &resp_buftype, flags, &rsp_iov);
4764 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
4765
4766 if (rc) {
4767 if (rc != -ENODATA) {
4768 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4769 cifs_dbg(VFS, "Send error in read = %d\n", rc);
4770 trace_smb3_read_err(0, 0, xid,
4771 req->PersistentFileId,
4772 io_parms->tcon->tid, ses->Suid,
4773 io_parms->offset, io_parms->length,
4774 rc);
4775 } else
4776 trace_smb3_read_done(0, 0, xid,
4777 req->PersistentFileId, io_parms->tcon->tid,
4778 ses->Suid, io_parms->offset, 0);
4779 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4780 cifs_small_buf_release(req);
4781 return rc == -ENODATA ? 0 : rc;
4782 } else
4783 trace_smb3_read_done(0, 0, xid,
4784 req->PersistentFileId,
4785 io_parms->tcon->tid, ses->Suid,
4786 io_parms->offset, io_parms->length);
4787
4788 cifs_small_buf_release(req);
4789
4790 *nbytes = le32_to_cpu(rsp->DataLength);
4791 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4792 (*nbytes > io_parms->length)) {
4793 cifs_dbg(FYI, "bad length %d for count %d\n",
4794 *nbytes, io_parms->length);
4795 rc = -EIO;
4796 *nbytes = 0;
4797 }
4798
4799 if (*buf) {
4800 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
4801 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4802 } else if (resp_buftype != CIFS_NO_BUFFER) {
4803 *buf = rsp_iov.iov_base;
4804 if (resp_buftype == CIFS_SMALL_BUFFER)
4805 *buf_type = CIFS_SMALL_BUFFER;
4806 else if (resp_buftype == CIFS_LARGE_BUFFER)
4807 *buf_type = CIFS_LARGE_BUFFER;
4808 }
4809 return rc;
4810 }
4811
4812 /*
4813 * Check the mid_state and signature on received buffer (if any), and queue the
4814 * workqueue completion task.
4815 */
4816 static void
smb2_writev_callback(struct mid_q_entry * mid)4817 smb2_writev_callback(struct mid_q_entry *mid)
4818 {
4819 struct cifs_io_subrequest *wdata = mid->callback_data;
4820 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4821 struct TCP_Server_Info *server = wdata->server;
4822 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
4823 struct cifs_credits credits = {
4824 .value = 0,
4825 .instance = 0,
4826 .rreq_debug_id = wdata->rreq->debug_id,
4827 .rreq_debug_index = wdata->subreq.debug_index,
4828 };
4829 unsigned int rreq_debug_id = wdata->rreq->debug_id;
4830 unsigned int subreq_debug_index = wdata->subreq.debug_index;
4831 ssize_t result = 0;
4832 size_t written;
4833
4834 WARN_ONCE(wdata->server != mid->server,
4835 "wdata server %p != mid server %p",
4836 wdata->server, mid->server);
4837
4838 switch (mid->mid_state) {
4839 case MID_RESPONSE_RECEIVED:
4840 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4841 credits.instance = server->reconnect_instance;
4842 result = smb2_check_receive(mid, server, 0);
4843 if (result != 0)
4844 break;
4845
4846 written = le32_to_cpu(rsp->DataLength);
4847 /*
4848 * Mask off high 16 bits when bytes written as returned
4849 * by the server is greater than bytes requested by the
4850 * client. OS/2 servers are known to set incorrect
4851 * CountHigh values.
4852 */
4853 if (written > wdata->subreq.len)
4854 written &= 0xFFFF;
4855
4856 if (written < wdata->subreq.len)
4857 wdata->result = -ENOSPC;
4858 else
4859 wdata->subreq.len = written;
4860 break;
4861 case MID_REQUEST_SUBMITTED:
4862 case MID_RETRY_NEEDED:
4863 result = -EAGAIN;
4864 break;
4865 case MID_RESPONSE_MALFORMED:
4866 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4867 credits.instance = server->reconnect_instance;
4868 fallthrough;
4869 default:
4870 result = -EIO;
4871 break;
4872 }
4873 #ifdef CONFIG_CIFS_SMB_DIRECT
4874 /*
4875 * If this wdata has a memory registered, the MR can be freed
4876 * The number of MRs available is limited, it's important to recover
4877 * used MR as soon as I/O is finished. Hold MR longer in the later
4878 * I/O process can possibly result in I/O deadlock due to lack of MR
4879 * to send request on I/O retry
4880 */
4881 if (wdata->mr) {
4882 smbd_deregister_mr(wdata->mr);
4883 wdata->mr = NULL;
4884 }
4885 #endif
4886 if (result) {
4887 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
4888 trace_smb3_write_err(wdata->rreq->debug_id,
4889 wdata->subreq.debug_index,
4890 wdata->xid,
4891 wdata->req->cfile->fid.persistent_fid,
4892 tcon->tid, tcon->ses->Suid, wdata->subreq.start,
4893 wdata->subreq.len, wdata->result);
4894 if (wdata->result == -ENOSPC)
4895 pr_warn_once("Out of space writing to %s\n",
4896 tcon->tree_name);
4897 } else
4898 trace_smb3_write_done(wdata->rreq->debug_id,
4899 wdata->subreq.debug_index,
4900 wdata->xid,
4901 wdata->req->cfile->fid.persistent_fid,
4902 tcon->tid, tcon->ses->Suid,
4903 wdata->subreq.start, wdata->subreq.len);
4904
4905 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value,
4906 server->credits, server->in_flight,
4907 0, cifs_trace_rw_credits_write_response_clear);
4908 wdata->credits.value = 0;
4909 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
4910 cifs_write_subrequest_terminated(wdata, result ?: written, true);
4911 release_mid(mid);
4912 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4913 server->credits, server->in_flight,
4914 credits.value, cifs_trace_rw_credits_write_response_add);
4915 add_credits(server, &credits, 0);
4916 }
4917
4918 /* smb2_async_writev - send an async write, and set up mid to handle result */
4919 void
smb2_async_writev(struct cifs_io_subrequest * wdata)4920 smb2_async_writev(struct cifs_io_subrequest *wdata)
4921 {
4922 int rc = -EACCES, flags = 0;
4923 struct smb2_write_req *req = NULL;
4924 struct smb2_hdr *shdr;
4925 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4926 struct TCP_Server_Info *server = wdata->server;
4927 struct kvec iov[1];
4928 struct smb_rqst rqst = { };
4929 unsigned int total_len, xid = wdata->xid;
4930 struct cifs_io_parms _io_parms;
4931 struct cifs_io_parms *io_parms = NULL;
4932 int credit_request;
4933
4934 /*
4935 * in future we may get cifs_io_parms passed in from the caller,
4936 * but for now we construct it here...
4937 */
4938 _io_parms = (struct cifs_io_parms) {
4939 .tcon = tcon,
4940 .server = server,
4941 .offset = wdata->subreq.start,
4942 .length = wdata->subreq.len,
4943 .persistent_fid = wdata->req->cfile->fid.persistent_fid,
4944 .volatile_fid = wdata->req->cfile->fid.volatile_fid,
4945 .pid = wdata->req->pid,
4946 };
4947 io_parms = &_io_parms;
4948
4949 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4950 (void **) &req, &total_len);
4951 if (rc)
4952 goto out;
4953
4954 rqst.rq_iov = iov;
4955 rqst.rq_iter = wdata->subreq.io_iter;
4956
4957 rqst.rq_iov[0].iov_len = total_len - 1;
4958 rqst.rq_iov[0].iov_base = (char *)req;
4959 rqst.rq_nvec += 1;
4960
4961 if (smb3_encryption_required(tcon))
4962 flags |= CIFS_TRANSFORM_REQ;
4963
4964 shdr = (struct smb2_hdr *)req;
4965 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4966
4967 req->PersistentFileId = io_parms->persistent_fid;
4968 req->VolatileFileId = io_parms->volatile_fid;
4969 req->WriteChannelInfoOffset = 0;
4970 req->WriteChannelInfoLength = 0;
4971 req->Channel = SMB2_CHANNEL_NONE;
4972 req->Length = cpu_to_le32(io_parms->length);
4973 req->Offset = cpu_to_le64(io_parms->offset);
4974 req->DataOffset = cpu_to_le16(
4975 offsetof(struct smb2_write_req, Buffer));
4976 req->RemainingBytes = 0;
4977
4978 trace_smb3_write_enter(wdata->rreq->debug_id,
4979 wdata->subreq.debug_index,
4980 wdata->xid,
4981 io_parms->persistent_fid,
4982 io_parms->tcon->tid,
4983 io_parms->tcon->ses->Suid,
4984 io_parms->offset,
4985 io_parms->length);
4986
4987 #ifdef CONFIG_CIFS_SMB_DIRECT
4988 /*
4989 * If we want to do a server RDMA read, fill in and append
4990 * smbd_buffer_descriptor_v1 to the end of write request
4991 */
4992 if (smb3_use_rdma_offload(io_parms)) {
4993 struct smbd_buffer_descriptor_v1 *v1;
4994 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4995
4996 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
4997 false, need_invalidate);
4998 if (!wdata->mr) {
4999 rc = -EAGAIN;
5000 goto async_writev_out;
5001 }
5002 /* For RDMA read, I/O size is in RemainingBytes not in Length */
5003 req->RemainingBytes = req->Length;
5004 req->Length = 0;
5005 req->DataOffset = 0;
5006 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
5007 if (need_invalidate)
5008 req->Channel = SMB2_CHANNEL_RDMA_V1;
5009 req->WriteChannelInfoOffset =
5010 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
5011 req->WriteChannelInfoLength =
5012 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
5013 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
5014 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
5015 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
5016 v1->length = cpu_to_le32(wdata->mr->mr->length);
5017
5018 rqst.rq_iov[0].iov_len += sizeof(*v1);
5019
5020 /*
5021 * We keep wdata->subreq.io_iter,
5022 * but we have to truncate rqst.rq_iter
5023 */
5024 iov_iter_truncate(&rqst.rq_iter, 0);
5025 }
5026 #endif
5027
5028 if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
5029 smb2_set_replay(server, &rqst);
5030
5031 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
5032 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
5033
5034 if (wdata->credits.value > 0) {
5035 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
5036 SMB2_MAX_BUFFER_SIZE));
5037 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
5038 if (server->credits >= server->max_credits)
5039 shdr->CreditRequest = cpu_to_le16(0);
5040 else
5041 shdr->CreditRequest = cpu_to_le16(
5042 min_t(int, server->max_credits -
5043 server->credits, credit_request));
5044
5045 rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust);
5046 if (rc)
5047 goto async_writev_out;
5048
5049 flags |= CIFS_HAS_CREDITS;
5050 }
5051
5052 /* XXX: compression + encryption is unsupported for now */
5053 if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst))
5054 flags |= CIFS_COMPRESS_REQ;
5055
5056 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
5057 wdata, flags, &wdata->credits);
5058 /* Can't touch wdata if rc == 0 */
5059 if (rc) {
5060 trace_smb3_write_err(wdata->rreq->debug_id,
5061 wdata->subreq.debug_index,
5062 xid,
5063 io_parms->persistent_fid,
5064 io_parms->tcon->tid,
5065 io_parms->tcon->ses->Suid,
5066 io_parms->offset,
5067 io_parms->length,
5068 rc);
5069 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
5070 }
5071
5072 async_writev_out:
5073 cifs_small_buf_release(req);
5074 out:
5075 if (rc) {
5076 trace_smb3_rw_credits(wdata->rreq->debug_id,
5077 wdata->subreq.debug_index,
5078 wdata->credits.value,
5079 server->credits, server->in_flight,
5080 -(int)wdata->credits.value,
5081 cifs_trace_rw_credits_write_response_clear);
5082 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
5083 cifs_write_subrequest_terminated(wdata, rc, true);
5084 }
5085 }
5086
5087 /*
5088 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
5089 * The length field from io_parms must be at least 1 and indicates a number of
5090 * elements with data to write that begins with position 1 in iov array. All
5091 * data length is specified by count.
5092 */
5093 int
SMB2_write(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,struct kvec * iov,int n_vec)5094 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
5095 unsigned int *nbytes, struct kvec *iov, int n_vec)
5096 {
5097 struct smb_rqst rqst;
5098 int rc = 0;
5099 struct smb2_write_req *req = NULL;
5100 struct smb2_write_rsp *rsp = NULL;
5101 int resp_buftype;
5102 struct kvec rsp_iov;
5103 int flags = 0;
5104 unsigned int total_len;
5105 struct TCP_Server_Info *server;
5106 int retries = 0, cur_sleep = 1;
5107
5108 replay_again:
5109 /* reinitialize for possible replay */
5110 flags = 0;
5111 *nbytes = 0;
5112 if (!io_parms->server)
5113 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
5114 server = io_parms->server;
5115 if (server == NULL)
5116 return -ECONNABORTED;
5117
5118 if (n_vec < 1)
5119 return rc;
5120
5121 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
5122 (void **) &req, &total_len);
5123 if (rc)
5124 return rc;
5125
5126 if (smb3_encryption_required(io_parms->tcon))
5127 flags |= CIFS_TRANSFORM_REQ;
5128
5129 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5130
5131 req->PersistentFileId = io_parms->persistent_fid;
5132 req->VolatileFileId = io_parms->volatile_fid;
5133 req->WriteChannelInfoOffset = 0;
5134 req->WriteChannelInfoLength = 0;
5135 req->Channel = 0;
5136 req->Length = cpu_to_le32(io_parms->length);
5137 req->Offset = cpu_to_le64(io_parms->offset);
5138 req->DataOffset = cpu_to_le16(
5139 offsetof(struct smb2_write_req, Buffer));
5140 req->RemainingBytes = 0;
5141
5142 trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
5143 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
5144 io_parms->offset, io_parms->length);
5145
5146 iov[0].iov_base = (char *)req;
5147 /* 1 for Buffer */
5148 iov[0].iov_len = total_len - 1;
5149
5150 memset(&rqst, 0, sizeof(struct smb_rqst));
5151 rqst.rq_iov = iov;
5152 rqst.rq_nvec = n_vec + 1;
5153
5154 if (retries)
5155 smb2_set_replay(server, &rqst);
5156
5157 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
5158 &rqst,
5159 &resp_buftype, flags, &rsp_iov);
5160 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
5161
5162 if (rc) {
5163 trace_smb3_write_err(0, 0, xid,
5164 req->PersistentFileId,
5165 io_parms->tcon->tid,
5166 io_parms->tcon->ses->Suid,
5167 io_parms->offset, io_parms->length, rc);
5168 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
5169 cifs_dbg(VFS, "Send error in write = %d\n", rc);
5170 } else {
5171 *nbytes = le32_to_cpu(rsp->DataLength);
5172 trace_smb3_write_done(0, 0, xid,
5173 req->PersistentFileId,
5174 io_parms->tcon->tid,
5175 io_parms->tcon->ses->Suid,
5176 io_parms->offset, *nbytes);
5177 }
5178
5179 cifs_small_buf_release(req);
5180 free_rsp_buf(resp_buftype, rsp);
5181
5182 if (is_replayable_error(rc) &&
5183 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
5184 goto replay_again;
5185
5186 return rc;
5187 }
5188
posix_info_sid_size(const void * beg,const void * end)5189 int posix_info_sid_size(const void *beg, const void *end)
5190 {
5191 size_t subauth;
5192 int total;
5193
5194 if (beg + 1 > end)
5195 return -1;
5196
5197 subauth = *(u8 *)(beg+1);
5198 if (subauth < 1 || subauth > 15)
5199 return -1;
5200
5201 total = 1 + 1 + 6 + 4*subauth;
5202 if (beg + total > end)
5203 return -1;
5204
5205 return total;
5206 }
5207
posix_info_parse(const void * beg,const void * end,struct smb2_posix_info_parsed * out)5208 int posix_info_parse(const void *beg, const void *end,
5209 struct smb2_posix_info_parsed *out)
5210
5211 {
5212 int total_len = 0;
5213 int owner_len, group_len;
5214 int name_len;
5215 const void *owner_sid;
5216 const void *group_sid;
5217 const void *name;
5218
5219 /* if no end bound given, assume payload to be correct */
5220 if (!end) {
5221 const struct smb2_posix_info *p = beg;
5222
5223 end = beg + le32_to_cpu(p->NextEntryOffset);
5224 /* last element will have a 0 offset, pick a sensible bound */
5225 if (end == beg)
5226 end += 0xFFFF;
5227 }
5228
5229 /* check base buf */
5230 if (beg + sizeof(struct smb2_posix_info) > end)
5231 return -1;
5232 total_len = sizeof(struct smb2_posix_info);
5233
5234 /* check owner sid */
5235 owner_sid = beg + total_len;
5236 owner_len = posix_info_sid_size(owner_sid, end);
5237 if (owner_len < 0)
5238 return -1;
5239 total_len += owner_len;
5240
5241 /* check group sid */
5242 group_sid = beg + total_len;
5243 group_len = posix_info_sid_size(group_sid, end);
5244 if (group_len < 0)
5245 return -1;
5246 total_len += group_len;
5247
5248 /* check name len */
5249 if (beg + total_len + 4 > end)
5250 return -1;
5251 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
5252 if (name_len < 1 || name_len > 0xFFFF)
5253 return -1;
5254 total_len += 4;
5255
5256 /* check name */
5257 name = beg + total_len;
5258 if (name + name_len > end)
5259 return -1;
5260 total_len += name_len;
5261
5262 if (out) {
5263 out->base = beg;
5264 out->size = total_len;
5265 out->name_len = name_len;
5266 out->name = name;
5267 memcpy(&out->owner, owner_sid, owner_len);
5268 memcpy(&out->group, group_sid, group_len);
5269 }
5270 return total_len;
5271 }
5272
posix_info_extra_size(const void * beg,const void * end)5273 static int posix_info_extra_size(const void *beg, const void *end)
5274 {
5275 int len = posix_info_parse(beg, end, NULL);
5276
5277 if (len < 0)
5278 return -1;
5279 return len - sizeof(struct smb2_posix_info);
5280 }
5281
5282 static unsigned int
num_entries(int infotype,char * bufstart,char * end_of_buf,char ** lastentry,size_t size)5283 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
5284 size_t size)
5285 {
5286 int len;
5287 unsigned int entrycount = 0;
5288 unsigned int next_offset = 0;
5289 char *entryptr;
5290 FILE_DIRECTORY_INFO *dir_info;
5291
5292 if (bufstart == NULL)
5293 return 0;
5294
5295 entryptr = bufstart;
5296
5297 while (1) {
5298 if (entryptr + next_offset < entryptr ||
5299 entryptr + next_offset > end_of_buf ||
5300 entryptr + next_offset + size > end_of_buf) {
5301 cifs_dbg(VFS, "malformed search entry would overflow\n");
5302 break;
5303 }
5304
5305 entryptr = entryptr + next_offset;
5306 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
5307
5308 if (infotype == SMB_FIND_FILE_POSIX_INFO)
5309 len = posix_info_extra_size(entryptr, end_of_buf);
5310 else
5311 len = le32_to_cpu(dir_info->FileNameLength);
5312
5313 if (len < 0 ||
5314 entryptr + len < entryptr ||
5315 entryptr + len > end_of_buf ||
5316 entryptr + len + size > end_of_buf) {
5317 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
5318 end_of_buf);
5319 break;
5320 }
5321
5322 *lastentry = entryptr;
5323 entrycount++;
5324
5325 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
5326 if (!next_offset)
5327 break;
5328 }
5329
5330 return entrycount;
5331 }
5332
5333 /*
5334 * Readdir/FindFirst
5335 */
SMB2_query_directory_init(const unsigned int xid,struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,int index,int info_level)5336 int SMB2_query_directory_init(const unsigned int xid,
5337 struct cifs_tcon *tcon,
5338 struct TCP_Server_Info *server,
5339 struct smb_rqst *rqst,
5340 u64 persistent_fid, u64 volatile_fid,
5341 int index, int info_level)
5342 {
5343 struct smb2_query_directory_req *req;
5344 unsigned char *bufptr;
5345 __le16 asteriks = cpu_to_le16('*');
5346 unsigned int output_size = CIFSMaxBufSize -
5347 MAX_SMB2_CREATE_RESPONSE_SIZE -
5348 MAX_SMB2_CLOSE_RESPONSE_SIZE;
5349 unsigned int total_len;
5350 struct kvec *iov = rqst->rq_iov;
5351 int len, rc;
5352
5353 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
5354 (void **) &req, &total_len);
5355 if (rc)
5356 return rc;
5357
5358 switch (info_level) {
5359 case SMB_FIND_FILE_DIRECTORY_INFO:
5360 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
5361 break;
5362 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5363 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
5364 break;
5365 case SMB_FIND_FILE_POSIX_INFO:
5366 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
5367 break;
5368 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5369 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
5370 break;
5371 default:
5372 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5373 info_level);
5374 return -EINVAL;
5375 }
5376
5377 req->FileIndex = cpu_to_le32(index);
5378 req->PersistentFileId = persistent_fid;
5379 req->VolatileFileId = volatile_fid;
5380
5381 len = 0x2;
5382 bufptr = req->Buffer;
5383 memcpy(bufptr, &asteriks, len);
5384
5385 req->FileNameOffset =
5386 cpu_to_le16(sizeof(struct smb2_query_directory_req));
5387 req->FileNameLength = cpu_to_le16(len);
5388 /*
5389 * BB could be 30 bytes or so longer if we used SMB2 specific
5390 * buffer lengths, but this is safe and close enough.
5391 */
5392 output_size = min_t(unsigned int, output_size, server->maxBuf);
5393 output_size = min_t(unsigned int, output_size, 2 << 15);
5394 req->OutputBufferLength = cpu_to_le32(output_size);
5395
5396 iov[0].iov_base = (char *)req;
5397 /* 1 for Buffer */
5398 iov[0].iov_len = total_len - 1;
5399
5400 iov[1].iov_base = (char *)(req->Buffer);
5401 iov[1].iov_len = len;
5402
5403 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
5404 tcon->ses->Suid, index, output_size);
5405
5406 return 0;
5407 }
5408
SMB2_query_directory_free(struct smb_rqst * rqst)5409 void SMB2_query_directory_free(struct smb_rqst *rqst)
5410 {
5411 if (rqst && rqst->rq_iov) {
5412 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
5413 }
5414 }
5415
5416 int
smb2_parse_query_directory(struct cifs_tcon * tcon,struct kvec * rsp_iov,int resp_buftype,struct cifs_search_info * srch_inf)5417 smb2_parse_query_directory(struct cifs_tcon *tcon,
5418 struct kvec *rsp_iov,
5419 int resp_buftype,
5420 struct cifs_search_info *srch_inf)
5421 {
5422 struct smb2_query_directory_rsp *rsp;
5423 size_t info_buf_size;
5424 char *end_of_smb;
5425 int rc;
5426
5427 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
5428
5429 switch (srch_inf->info_level) {
5430 case SMB_FIND_FILE_DIRECTORY_INFO:
5431 info_buf_size = sizeof(FILE_DIRECTORY_INFO);
5432 break;
5433 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5434 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO);
5435 break;
5436 case SMB_FIND_FILE_POSIX_INFO:
5437 /* note that posix payload are variable size */
5438 info_buf_size = sizeof(struct smb2_posix_info);
5439 break;
5440 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5441 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
5442 break;
5443 default:
5444 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5445 srch_inf->info_level);
5446 return -EINVAL;
5447 }
5448
5449 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5450 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
5451 info_buf_size);
5452 if (rc) {
5453 cifs_tcon_dbg(VFS, "bad info payload");
5454 return rc;
5455 }
5456
5457 srch_inf->unicode = true;
5458
5459 if (srch_inf->ntwrk_buf_start) {
5460 if (srch_inf->smallBuf)
5461 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
5462 else
5463 cifs_buf_release(srch_inf->ntwrk_buf_start);
5464 }
5465 srch_inf->ntwrk_buf_start = (char *)rsp;
5466 srch_inf->srch_entries_start = srch_inf->last_entry =
5467 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
5468 end_of_smb = rsp_iov->iov_len + (char *)rsp;
5469
5470 srch_inf->entries_in_buffer = num_entries(
5471 srch_inf->info_level,
5472 srch_inf->srch_entries_start,
5473 end_of_smb,
5474 &srch_inf->last_entry,
5475 info_buf_size);
5476
5477 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
5478 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
5479 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
5480 srch_inf->srch_entries_start, srch_inf->last_entry);
5481 if (resp_buftype == CIFS_LARGE_BUFFER)
5482 srch_inf->smallBuf = false;
5483 else if (resp_buftype == CIFS_SMALL_BUFFER)
5484 srch_inf->smallBuf = true;
5485 else
5486 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
5487
5488 return 0;
5489 }
5490
5491 int
SMB2_query_directory(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int index,struct cifs_search_info * srch_inf)5492 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
5493 u64 persistent_fid, u64 volatile_fid, int index,
5494 struct cifs_search_info *srch_inf)
5495 {
5496 struct smb_rqst rqst;
5497 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
5498 struct smb2_query_directory_rsp *rsp = NULL;
5499 int resp_buftype = CIFS_NO_BUFFER;
5500 struct kvec rsp_iov;
5501 int rc = 0;
5502 struct cifs_ses *ses = tcon->ses;
5503 struct TCP_Server_Info *server;
5504 int flags = 0;
5505 int retries = 0, cur_sleep = 1;
5506
5507 replay_again:
5508 /* reinitialize for possible replay */
5509 flags = 0;
5510 server = cifs_pick_channel(ses);
5511
5512 if (!ses || !(ses->server))
5513 return -EIO;
5514
5515 if (smb3_encryption_required(tcon))
5516 flags |= CIFS_TRANSFORM_REQ;
5517
5518 memset(&rqst, 0, sizeof(struct smb_rqst));
5519 memset(&iov, 0, sizeof(iov));
5520 rqst.rq_iov = iov;
5521 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
5522
5523 rc = SMB2_query_directory_init(xid, tcon, server,
5524 &rqst, persistent_fid,
5525 volatile_fid, index,
5526 srch_inf->info_level);
5527 if (rc)
5528 goto qdir_exit;
5529
5530 if (retries)
5531 smb2_set_replay(server, &rqst);
5532
5533 rc = cifs_send_recv(xid, ses, server,
5534 &rqst, &resp_buftype, flags, &rsp_iov);
5535 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
5536
5537 if (rc) {
5538 if (rc == -ENODATA &&
5539 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
5540 trace_smb3_query_dir_done(xid, persistent_fid,
5541 tcon->tid, tcon->ses->Suid, index, 0);
5542 srch_inf->endOfSearch = true;
5543 rc = 0;
5544 } else {
5545 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5546 tcon->ses->Suid, index, 0, rc);
5547 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
5548 }
5549 goto qdir_exit;
5550 }
5551
5552 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
5553 srch_inf);
5554 if (rc) {
5555 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5556 tcon->ses->Suid, index, 0, rc);
5557 goto qdir_exit;
5558 }
5559 resp_buftype = CIFS_NO_BUFFER;
5560
5561 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
5562 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
5563
5564 qdir_exit:
5565 SMB2_query_directory_free(&rqst);
5566 free_rsp_buf(resp_buftype, rsp);
5567
5568 if (is_replayable_error(rc) &&
5569 smb2_should_replay(tcon, &retries, &cur_sleep))
5570 goto replay_again;
5571
5572 return rc;
5573 }
5574
5575 int
SMB2_set_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,void ** data,unsigned int * size)5576 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
5577 struct smb_rqst *rqst,
5578 u64 persistent_fid, u64 volatile_fid, u32 pid,
5579 u8 info_class, u8 info_type, u32 additional_info,
5580 void **data, unsigned int *size)
5581 {
5582 struct smb2_set_info_req *req;
5583 struct kvec *iov = rqst->rq_iov;
5584 unsigned int i, total_len;
5585 int rc;
5586
5587 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
5588 (void **) &req, &total_len);
5589 if (rc)
5590 return rc;
5591
5592 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
5593 req->InfoType = info_type;
5594 req->FileInfoClass = info_class;
5595 req->PersistentFileId = persistent_fid;
5596 req->VolatileFileId = volatile_fid;
5597 req->AdditionalInformation = cpu_to_le32(additional_info);
5598
5599 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req));
5600 req->BufferLength = cpu_to_le32(*size);
5601
5602 memcpy(req->Buffer, *data, *size);
5603 total_len += *size;
5604
5605 iov[0].iov_base = (char *)req;
5606 /* 1 for Buffer */
5607 iov[0].iov_len = total_len - 1;
5608
5609 for (i = 1; i < rqst->rq_nvec; i++) {
5610 le32_add_cpu(&req->BufferLength, size[i]);
5611 iov[i].iov_base = (char *)data[i];
5612 iov[i].iov_len = size[i];
5613 }
5614
5615 return 0;
5616 }
5617
5618 void
SMB2_set_info_free(struct smb_rqst * rqst)5619 SMB2_set_info_free(struct smb_rqst *rqst)
5620 {
5621 if (rqst && rqst->rq_iov)
5622 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
5623 }
5624
5625 static int
send_set_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,unsigned int num,void ** data,unsigned int * size)5626 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
5627 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
5628 u8 info_type, u32 additional_info, unsigned int num,
5629 void **data, unsigned int *size)
5630 {
5631 struct smb_rqst rqst;
5632 struct smb2_set_info_rsp *rsp = NULL;
5633 struct kvec *iov;
5634 struct kvec rsp_iov;
5635 int rc = 0;
5636 int resp_buftype;
5637 struct cifs_ses *ses = tcon->ses;
5638 struct TCP_Server_Info *server;
5639 int flags = 0;
5640 int retries = 0, cur_sleep = 1;
5641
5642 replay_again:
5643 /* reinitialize for possible replay */
5644 flags = 0;
5645 server = cifs_pick_channel(ses);
5646
5647 if (!ses || !server)
5648 return -EIO;
5649
5650 if (!num)
5651 return -EINVAL;
5652
5653 if (smb3_encryption_required(tcon))
5654 flags |= CIFS_TRANSFORM_REQ;
5655
5656 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5657 if (!iov)
5658 return -ENOMEM;
5659
5660 memset(&rqst, 0, sizeof(struct smb_rqst));
5661 rqst.rq_iov = iov;
5662 rqst.rq_nvec = num;
5663
5664 rc = SMB2_set_info_init(tcon, server,
5665 &rqst, persistent_fid, volatile_fid, pid,
5666 info_class, info_type, additional_info,
5667 data, size);
5668 if (rc) {
5669 kfree(iov);
5670 return rc;
5671 }
5672
5673 if (retries)
5674 smb2_set_replay(server, &rqst);
5675
5676 rc = cifs_send_recv(xid, ses, server,
5677 &rqst, &resp_buftype, flags,
5678 &rsp_iov);
5679 SMB2_set_info_free(&rqst);
5680 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
5681
5682 if (rc != 0) {
5683 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
5684 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5685 ses->Suid, info_class, (__u32)info_type, rc);
5686 }
5687
5688 free_rsp_buf(resp_buftype, rsp);
5689 kfree(iov);
5690
5691 if (is_replayable_error(rc) &&
5692 smb2_should_replay(tcon, &retries, &cur_sleep))
5693 goto replay_again;
5694
5695 return rc;
5696 }
5697
5698 int
SMB2_set_eof(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,loff_t new_eof)5699 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
5700 u64 volatile_fid, u32 pid, loff_t new_eof)
5701 {
5702 struct smb2_file_eof_info info;
5703 void *data;
5704 unsigned int size;
5705
5706 info.EndOfFile = cpu_to_le64(new_eof);
5707
5708 data = &info;
5709 size = sizeof(struct smb2_file_eof_info);
5710
5711 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof);
5712
5713 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5714 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5715 0, 1, &data, &size);
5716 }
5717
5718 int
SMB2_set_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb_ntsd * pnntsd,int pacllen,int aclflag)5719 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5720 u64 persistent_fid, u64 volatile_fid,
5721 struct smb_ntsd *pnntsd, int pacllen, int aclflag)
5722 {
5723 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5724 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5725 1, (void **)&pnntsd, &pacllen);
5726 }
5727
5728 int
SMB2_set_ea(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_full_ea_info * buf,int len)5729 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5730 u64 persistent_fid, u64 volatile_fid,
5731 struct smb2_file_full_ea_info *buf, int len)
5732 {
5733 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5734 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5735 0, 1, (void **)&buf, &len);
5736 }
5737
5738 int
SMB2_oplock_break(const unsigned int xid,struct cifs_tcon * tcon,const u64 persistent_fid,const u64 volatile_fid,__u8 oplock_level)5739 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5740 const u64 persistent_fid, const u64 volatile_fid,
5741 __u8 oplock_level)
5742 {
5743 struct smb_rqst rqst;
5744 int rc;
5745 struct smb2_oplock_break *req = NULL;
5746 struct cifs_ses *ses = tcon->ses;
5747 struct TCP_Server_Info *server;
5748 int flags = CIFS_OBREAK_OP;
5749 unsigned int total_len;
5750 struct kvec iov[1];
5751 struct kvec rsp_iov;
5752 int resp_buf_type;
5753 int retries = 0, cur_sleep = 1;
5754
5755 replay_again:
5756 /* reinitialize for possible replay */
5757 flags = CIFS_OBREAK_OP;
5758 server = cifs_pick_channel(ses);
5759
5760 cifs_dbg(FYI, "SMB2_oplock_break\n");
5761 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5762 (void **) &req, &total_len);
5763 if (rc)
5764 return rc;
5765
5766 if (smb3_encryption_required(tcon))
5767 flags |= CIFS_TRANSFORM_REQ;
5768
5769 req->VolatileFid = volatile_fid;
5770 req->PersistentFid = persistent_fid;
5771 req->OplockLevel = oplock_level;
5772 req->hdr.CreditRequest = cpu_to_le16(1);
5773
5774 flags |= CIFS_NO_RSP_BUF;
5775
5776 iov[0].iov_base = (char *)req;
5777 iov[0].iov_len = total_len;
5778
5779 memset(&rqst, 0, sizeof(struct smb_rqst));
5780 rqst.rq_iov = iov;
5781 rqst.rq_nvec = 1;
5782
5783 if (retries)
5784 smb2_set_replay(server, &rqst);
5785
5786 rc = cifs_send_recv(xid, ses, server,
5787 &rqst, &resp_buf_type, flags, &rsp_iov);
5788 cifs_small_buf_release(req);
5789 if (rc) {
5790 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5791 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
5792 }
5793
5794 if (is_replayable_error(rc) &&
5795 smb2_should_replay(tcon, &retries, &cur_sleep))
5796 goto replay_again;
5797
5798 return rc;
5799 }
5800
5801 void
smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info * pfs_inf,struct kstatfs * kst)5802 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5803 struct kstatfs *kst)
5804 {
5805 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5806 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5807 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
5808 kst->f_bfree = kst->f_bavail =
5809 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
5810 return;
5811 }
5812
5813 static void
copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO * response_data,struct kstatfs * kst)5814 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5815 struct kstatfs *kst)
5816 {
5817 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5818 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5819 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5820 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5821 kst->f_bavail = kst->f_bfree;
5822 else
5823 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5824 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5825 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5826 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5827 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5828
5829 return;
5830 }
5831
5832 static int
build_qfs_info_req(struct kvec * iov,struct cifs_tcon * tcon,struct TCP_Server_Info * server,int level,int outbuf_len,u64 persistent_fid,u64 volatile_fid)5833 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5834 struct TCP_Server_Info *server,
5835 int level, int outbuf_len, u64 persistent_fid,
5836 u64 volatile_fid)
5837 {
5838 int rc;
5839 struct smb2_query_info_req *req;
5840 unsigned int total_len;
5841
5842 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
5843
5844 if ((tcon->ses == NULL) || server == NULL)
5845 return -EIO;
5846
5847 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5848 (void **) &req, &total_len);
5849 if (rc)
5850 return rc;
5851
5852 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5853 req->FileInfoClass = level;
5854 req->PersistentFileId = persistent_fid;
5855 req->VolatileFileId = volatile_fid;
5856 /* 1 for pad */
5857 req->InputBufferOffset =
5858 cpu_to_le16(sizeof(struct smb2_query_info_req));
5859 req->OutputBufferLength = cpu_to_le32(
5860 outbuf_len + sizeof(struct smb2_query_info_rsp));
5861
5862 iov->iov_base = (char *)req;
5863 iov->iov_len = total_len;
5864 return 0;
5865 }
5866
free_qfs_info_req(struct kvec * iov)5867 static inline void free_qfs_info_req(struct kvec *iov)
5868 {
5869 cifs_buf_release(iov->iov_base);
5870 }
5871
5872 int
SMB311_posix_qfs_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct kstatfs * fsdata)5873 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5874 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5875 {
5876 struct smb_rqst rqst;
5877 struct smb2_query_info_rsp *rsp = NULL;
5878 struct kvec iov;
5879 struct kvec rsp_iov;
5880 int rc = 0;
5881 int resp_buftype;
5882 struct cifs_ses *ses = tcon->ses;
5883 struct TCP_Server_Info *server;
5884 FILE_SYSTEM_POSIX_INFO *info = NULL;
5885 int flags = 0;
5886 int retries = 0, cur_sleep = 1;
5887
5888 replay_again:
5889 /* reinitialize for possible replay */
5890 flags = 0;
5891 server = cifs_pick_channel(ses);
5892
5893 rc = build_qfs_info_req(&iov, tcon, server,
5894 FS_POSIX_INFORMATION,
5895 sizeof(FILE_SYSTEM_POSIX_INFO),
5896 persistent_fid, volatile_fid);
5897 if (rc)
5898 return rc;
5899
5900 if (smb3_encryption_required(tcon))
5901 flags |= CIFS_TRANSFORM_REQ;
5902
5903 memset(&rqst, 0, sizeof(struct smb_rqst));
5904 rqst.rq_iov = &iov;
5905 rqst.rq_nvec = 1;
5906
5907 if (retries)
5908 smb2_set_replay(server, &rqst);
5909
5910 rc = cifs_send_recv(xid, ses, server,
5911 &rqst, &resp_buftype, flags, &rsp_iov);
5912 free_qfs_info_req(&iov);
5913 if (rc) {
5914 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5915 goto posix_qfsinf_exit;
5916 }
5917 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5918
5919 info = (FILE_SYSTEM_POSIX_INFO *)(
5920 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5921 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5922 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5923 sizeof(FILE_SYSTEM_POSIX_INFO));
5924 if (!rc)
5925 copy_posix_fs_info_to_kstatfs(info, fsdata);
5926
5927 posix_qfsinf_exit:
5928 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5929
5930 if (is_replayable_error(rc) &&
5931 smb2_should_replay(tcon, &retries, &cur_sleep))
5932 goto replay_again;
5933
5934 return rc;
5935 }
5936
5937 int
SMB2_QFS_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct kstatfs * fsdata)5938 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5939 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5940 {
5941 struct smb_rqst rqst;
5942 struct smb2_query_info_rsp *rsp = NULL;
5943 struct kvec iov;
5944 struct kvec rsp_iov;
5945 int rc = 0;
5946 int resp_buftype;
5947 struct cifs_ses *ses = tcon->ses;
5948 struct TCP_Server_Info *server;
5949 struct smb2_fs_full_size_info *info = NULL;
5950 int flags = 0;
5951 int retries = 0, cur_sleep = 1;
5952
5953 replay_again:
5954 /* reinitialize for possible replay */
5955 flags = 0;
5956 server = cifs_pick_channel(ses);
5957
5958 rc = build_qfs_info_req(&iov, tcon, server,
5959 FS_FULL_SIZE_INFORMATION,
5960 sizeof(struct smb2_fs_full_size_info),
5961 persistent_fid, volatile_fid);
5962 if (rc)
5963 return rc;
5964
5965 if (smb3_encryption_required(tcon))
5966 flags |= CIFS_TRANSFORM_REQ;
5967
5968 memset(&rqst, 0, sizeof(struct smb_rqst));
5969 rqst.rq_iov = &iov;
5970 rqst.rq_nvec = 1;
5971
5972 if (retries)
5973 smb2_set_replay(server, &rqst);
5974
5975 rc = cifs_send_recv(xid, ses, server,
5976 &rqst, &resp_buftype, flags, &rsp_iov);
5977 free_qfs_info_req(&iov);
5978 if (rc) {
5979 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5980 goto qfsinf_exit;
5981 }
5982 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5983
5984 info = (struct smb2_fs_full_size_info *)(
5985 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5986 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5987 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5988 sizeof(struct smb2_fs_full_size_info));
5989 if (!rc)
5990 smb2_copy_fs_info_to_kstatfs(info, fsdata);
5991
5992 qfsinf_exit:
5993 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5994
5995 if (is_replayable_error(rc) &&
5996 smb2_should_replay(tcon, &retries, &cur_sleep))
5997 goto replay_again;
5998
5999 return rc;
6000 }
6001
6002 int
SMB2_QFS_attr(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int level)6003 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
6004 u64 persistent_fid, u64 volatile_fid, int level)
6005 {
6006 struct smb_rqst rqst;
6007 struct smb2_query_info_rsp *rsp = NULL;
6008 struct kvec iov;
6009 struct kvec rsp_iov;
6010 int rc = 0;
6011 int resp_buftype, max_len, min_len;
6012 struct cifs_ses *ses = tcon->ses;
6013 struct TCP_Server_Info *server;
6014 unsigned int rsp_len, offset;
6015 int flags = 0;
6016 int retries = 0, cur_sleep = 1;
6017
6018 replay_again:
6019 /* reinitialize for possible replay */
6020 flags = 0;
6021 server = cifs_pick_channel(ses);
6022
6023 if (level == FS_DEVICE_INFORMATION) {
6024 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6025 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6026 } else if (level == FS_ATTRIBUTE_INFORMATION) {
6027 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
6028 min_len = MIN_FS_ATTR_INFO_SIZE;
6029 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
6030 max_len = sizeof(struct smb3_fs_ss_info);
6031 min_len = sizeof(struct smb3_fs_ss_info);
6032 } else if (level == FS_VOLUME_INFORMATION) {
6033 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
6034 min_len = sizeof(struct smb3_fs_vol_info);
6035 } else {
6036 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
6037 return -EINVAL;
6038 }
6039
6040 rc = build_qfs_info_req(&iov, tcon, server,
6041 level, max_len,
6042 persistent_fid, volatile_fid);
6043 if (rc)
6044 return rc;
6045
6046 if (smb3_encryption_required(tcon))
6047 flags |= CIFS_TRANSFORM_REQ;
6048
6049 memset(&rqst, 0, sizeof(struct smb_rqst));
6050 rqst.rq_iov = &iov;
6051 rqst.rq_nvec = 1;
6052
6053 if (retries)
6054 smb2_set_replay(server, &rqst);
6055
6056 rc = cifs_send_recv(xid, ses, server,
6057 &rqst, &resp_buftype, flags, &rsp_iov);
6058 free_qfs_info_req(&iov);
6059 if (rc) {
6060 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6061 goto qfsattr_exit;
6062 }
6063 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6064
6065 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
6066 offset = le16_to_cpu(rsp->OutputBufferOffset);
6067 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
6068 if (rc)
6069 goto qfsattr_exit;
6070
6071 if (level == FS_ATTRIBUTE_INFORMATION)
6072 memcpy(&tcon->fsAttrInfo, offset
6073 + (char *)rsp, min_t(unsigned int,
6074 rsp_len, max_len));
6075 else if (level == FS_DEVICE_INFORMATION)
6076 memcpy(&tcon->fsDevInfo, offset
6077 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
6078 else if (level == FS_SECTOR_SIZE_INFORMATION) {
6079 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
6080 (offset + (char *)rsp);
6081 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
6082 tcon->perf_sector_size =
6083 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
6084 } else if (level == FS_VOLUME_INFORMATION) {
6085 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
6086 (offset + (char *)rsp);
6087 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
6088 tcon->vol_create_time = vol_info->VolumeCreationTime;
6089 }
6090
6091 qfsattr_exit:
6092 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6093
6094 if (is_replayable_error(rc) &&
6095 smb2_should_replay(tcon, &retries, &cur_sleep))
6096 goto replay_again;
6097
6098 return rc;
6099 }
6100
6101 int
smb2_lockv(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u32 num_lock,struct smb2_lock_element * buf)6102 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
6103 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6104 const __u32 num_lock, struct smb2_lock_element *buf)
6105 {
6106 struct smb_rqst rqst;
6107 int rc = 0;
6108 struct smb2_lock_req *req = NULL;
6109 struct kvec iov[2];
6110 struct kvec rsp_iov;
6111 int resp_buf_type;
6112 unsigned int count;
6113 int flags = CIFS_NO_RSP_BUF;
6114 unsigned int total_len;
6115 struct TCP_Server_Info *server;
6116 int retries = 0, cur_sleep = 1;
6117
6118 replay_again:
6119 /* reinitialize for possible replay */
6120 flags = CIFS_NO_RSP_BUF;
6121 server = cifs_pick_channel(tcon->ses);
6122
6123 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
6124
6125 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
6126 (void **) &req, &total_len);
6127 if (rc)
6128 return rc;
6129
6130 if (smb3_encryption_required(tcon))
6131 flags |= CIFS_TRANSFORM_REQ;
6132
6133 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
6134 req->LockCount = cpu_to_le16(num_lock);
6135
6136 req->PersistentFileId = persist_fid;
6137 req->VolatileFileId = volatile_fid;
6138
6139 count = num_lock * sizeof(struct smb2_lock_element);
6140
6141 iov[0].iov_base = (char *)req;
6142 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
6143 iov[1].iov_base = (char *)buf;
6144 iov[1].iov_len = count;
6145
6146 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
6147
6148 memset(&rqst, 0, sizeof(struct smb_rqst));
6149 rqst.rq_iov = iov;
6150 rqst.rq_nvec = 2;
6151
6152 if (retries)
6153 smb2_set_replay(server, &rqst);
6154
6155 rc = cifs_send_recv(xid, tcon->ses, server,
6156 &rqst, &resp_buf_type, flags,
6157 &rsp_iov);
6158 cifs_small_buf_release(req);
6159 if (rc) {
6160 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
6161 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
6162 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
6163 tcon->ses->Suid, rc);
6164 }
6165
6166 if (is_replayable_error(rc) &&
6167 smb2_should_replay(tcon, &retries, &cur_sleep))
6168 goto replay_again;
6169
6170 return rc;
6171 }
6172
6173 int
SMB2_lock(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u64 length,const __u64 offset,const __u32 lock_flags,const bool wait)6174 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
6175 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6176 const __u64 length, const __u64 offset, const __u32 lock_flags,
6177 const bool wait)
6178 {
6179 struct smb2_lock_element lock;
6180
6181 lock.Offset = cpu_to_le64(offset);
6182 lock.Length = cpu_to_le64(length);
6183 lock.Flags = cpu_to_le32(lock_flags);
6184 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
6185 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
6186
6187 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
6188 }
6189
6190 int
SMB2_lease_break(const unsigned int xid,struct cifs_tcon * tcon,__u8 * lease_key,const __le32 lease_state)6191 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
6192 __u8 *lease_key, const __le32 lease_state)
6193 {
6194 struct smb_rqst rqst;
6195 int rc;
6196 struct smb2_lease_ack *req = NULL;
6197 struct cifs_ses *ses = tcon->ses;
6198 int flags = CIFS_OBREAK_OP;
6199 unsigned int total_len;
6200 struct kvec iov[1];
6201 struct kvec rsp_iov;
6202 int resp_buf_type;
6203 __u64 *please_key_high;
6204 __u64 *please_key_low;
6205 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
6206
6207 cifs_dbg(FYI, "SMB2_lease_break\n");
6208 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
6209 (void **) &req, &total_len);
6210 if (rc)
6211 return rc;
6212
6213 if (smb3_encryption_required(tcon))
6214 flags |= CIFS_TRANSFORM_REQ;
6215
6216 req->hdr.CreditRequest = cpu_to_le16(1);
6217 req->StructureSize = cpu_to_le16(36);
6218 total_len += 12;
6219
6220 memcpy(req->LeaseKey, lease_key, 16);
6221 req->LeaseState = lease_state;
6222
6223 flags |= CIFS_NO_RSP_BUF;
6224
6225 iov[0].iov_base = (char *)req;
6226 iov[0].iov_len = total_len;
6227
6228 memset(&rqst, 0, sizeof(struct smb_rqst));
6229 rqst.rq_iov = iov;
6230 rqst.rq_nvec = 1;
6231
6232 rc = cifs_send_recv(xid, ses, server,
6233 &rqst, &resp_buf_type, flags, &rsp_iov);
6234 cifs_small_buf_release(req);
6235
6236 please_key_low = (__u64 *)lease_key;
6237 please_key_high = (__u64 *)(lease_key+8);
6238 if (rc) {
6239 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
6240 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
6241 ses->Suid, *please_key_low, *please_key_high, rc);
6242 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
6243 } else
6244 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
6245 ses->Suid, *please_key_low, *please_key_high);
6246
6247 return rc;
6248 }
6249