1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40 #include "smb2proto.h"
41 #include "smbdirect.h"
42 
43 /* Max number of iovectors we can use off the stack when sending requests. */
44 #define CIFS_MAX_IOV_SIZE 8
45 
46 void
cifs_wake_up_task(struct mid_q_entry * mid)47 cifs_wake_up_task(struct mid_q_entry *mid)
48 {
49 	wake_up_process(mid->callback_data);
50 }
51 
52 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)53 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54 {
55 	struct mid_q_entry *temp;
56 
57 	if (server == NULL) {
58 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 		return NULL;
60 	}
61 
62 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 	memset(temp, 0, sizeof(struct mid_q_entry));
64 	kref_init(&temp->refcount);
65 	temp->mid = get_mid(smb_buffer);
66 	temp->pid = current->pid;
67 	temp->command = cpu_to_le16(smb_buffer->Command);
68 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 	/* when mid allocated can be before when sent */
71 	temp->when_alloc = jiffies;
72 	temp->server = server;
73 
74 	/*
75 	 * The default is for the mid to be synchronous, so the
76 	 * default callback just wakes up the current task.
77 	 */
78 	temp->callback = cifs_wake_up_task;
79 	temp->callback_data = current;
80 
81 	atomic_inc(&midCount);
82 	temp->mid_state = MID_REQUEST_ALLOCATED;
83 	return temp;
84 }
85 
_cifs_mid_q_entry_release(struct kref * refcount)86 static void _cifs_mid_q_entry_release(struct kref *refcount)
87 {
88 	struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 					       refcount);
90 
91 	mempool_free(mid, cifs_mid_poolp);
92 }
93 
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)94 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95 {
96 	spin_lock(&GlobalMid_Lock);
97 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 	spin_unlock(&GlobalMid_Lock);
99 }
100 
101 void
DeleteMidQEntry(struct mid_q_entry * midEntry)102 DeleteMidQEntry(struct mid_q_entry *midEntry)
103 {
104 #ifdef CONFIG_CIFS_STATS2
105 	__le16 command = midEntry->server->vals->lock_cmd;
106 	unsigned long now;
107 #endif
108 	midEntry->mid_state = MID_FREE;
109 	atomic_dec(&midCount);
110 	if (midEntry->large_buf)
111 		cifs_buf_release(midEntry->resp_buf);
112 	else
113 		cifs_small_buf_release(midEntry->resp_buf);
114 #ifdef CONFIG_CIFS_STATS2
115 	now = jiffies;
116 	/* commands taking longer than one second are indications that
117 	   something is wrong, unless it is quite a slow link or server */
118 	if (time_after(now, midEntry->when_alloc + HZ) &&
119 	    (midEntry->command != command)) {
120 		/* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
121 		if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
122 		    (le16_to_cpu(midEntry->command) >= 0))
123 			cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
124 
125 		trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
126 			       midEntry->mid, midEntry->pid,
127 			       midEntry->when_sent, midEntry->when_received);
128 		if (cifsFYI & CIFS_TIMER) {
129 			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
130 			       midEntry->command, midEntry->mid);
131 			pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
132 			       now - midEntry->when_alloc,
133 			       now - midEntry->when_sent,
134 			       now - midEntry->when_received);
135 		}
136 	}
137 #endif
138 	cifs_mid_q_entry_release(midEntry);
139 }
140 
141 void
cifs_delete_mid(struct mid_q_entry * mid)142 cifs_delete_mid(struct mid_q_entry *mid)
143 {
144 	spin_lock(&GlobalMid_Lock);
145 	list_del_init(&mid->qhead);
146 	mid->mid_flags |= MID_DELETED;
147 	spin_unlock(&GlobalMid_Lock);
148 
149 	DeleteMidQEntry(mid);
150 }
151 
152 /*
153  * smb_send_kvec - send an array of kvecs to the server
154  * @server:	Server to send the data to
155  * @smb_msg:	Message to send
156  * @sent:	amount of data sent on socket is stored here
157  *
158  * Our basic "send data to server" function. Should be called with srv_mutex
159  * held. The caller is responsible for handling the results.
160  */
161 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)162 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
163 	      size_t *sent)
164 {
165 	int rc = 0;
166 	int retries = 0;
167 	struct socket *ssocket = server->ssocket;
168 
169 	*sent = 0;
170 
171 	smb_msg->msg_name = NULL;
172 	smb_msg->msg_namelen = 0;
173 	smb_msg->msg_control = NULL;
174 	smb_msg->msg_controllen = 0;
175 	if (server->noblocksnd)
176 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
177 	else
178 		smb_msg->msg_flags = MSG_NOSIGNAL;
179 
180 	while (msg_data_left(smb_msg)) {
181 		/*
182 		 * If blocking send, we try 3 times, since each can block
183 		 * for 5 seconds. For nonblocking  we have to try more
184 		 * but wait increasing amounts of time allowing time for
185 		 * socket to clear.  The overall time we wait in either
186 		 * case to send on the socket is about 15 seconds.
187 		 * Similarly we wait for 15 seconds for a response from
188 		 * the server in SendReceive[2] for the server to send
189 		 * a response back for most types of requests (except
190 		 * SMB Write past end of file which can be slow, and
191 		 * blocking lock operations). NFS waits slightly longer
192 		 * than CIFS, but this can make it take longer for
193 		 * nonresponsive servers to be detected and 15 seconds
194 		 * is more than enough time for modern networks to
195 		 * send a packet.  In most cases if we fail to send
196 		 * after the retries we will kill the socket and
197 		 * reconnect which may clear the network problem.
198 		 */
199 		rc = sock_sendmsg(ssocket, smb_msg);
200 		if (rc == -EAGAIN) {
201 			retries++;
202 			if (retries >= 14 ||
203 			    (!server->noblocksnd && (retries > 2))) {
204 				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
205 					 ssocket);
206 				return -EAGAIN;
207 			}
208 			msleep(1 << retries);
209 			continue;
210 		}
211 
212 		if (rc < 0)
213 			return rc;
214 
215 		if (rc == 0) {
216 			/* should never happen, letting socket clear before
217 			   retrying is our only obvious option here */
218 			cifs_dbg(VFS, "tcp sent no data\n");
219 			msleep(500);
220 			continue;
221 		}
222 
223 		/* send was at least partially successful */
224 		*sent += rc;
225 		retries = 0; /* in case we get ENOSPC on the next send */
226 	}
227 	return 0;
228 }
229 
230 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)231 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
232 {
233 	unsigned int i;
234 	struct kvec *iov;
235 	int nvec;
236 	unsigned long buflen = 0;
237 
238 	if (server->vals->header_preamble_size == 0 &&
239 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
240 		iov = &rqst->rq_iov[1];
241 		nvec = rqst->rq_nvec - 1;
242 	} else {
243 		iov = rqst->rq_iov;
244 		nvec = rqst->rq_nvec;
245 	}
246 
247 	/* total up iov array first */
248 	for (i = 0; i < nvec; i++)
249 		buflen += iov[i].iov_len;
250 
251 	/*
252 	 * Add in the page array if there is one. The caller needs to make
253 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
254 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
255 	 * PAGE_SIZE.
256 	 */
257 	if (rqst->rq_npages) {
258 		if (rqst->rq_npages == 1)
259 			buflen += rqst->rq_tailsz;
260 		else {
261 			/*
262 			 * If there is more than one page, calculate the
263 			 * buffer length based on rq_offset and rq_tailsz
264 			 */
265 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
266 					rqst->rq_offset;
267 			buflen += rqst->rq_tailsz;
268 		}
269 	}
270 
271 	return buflen;
272 }
273 
274 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)275 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
276 		struct smb_rqst *rqst)
277 {
278 	int rc = 0;
279 	struct kvec *iov;
280 	int n_vec;
281 	unsigned int send_length = 0;
282 	unsigned int i, j;
283 	size_t total_len = 0, sent, size;
284 	struct socket *ssocket = server->ssocket;
285 	struct msghdr smb_msg;
286 	int val = 1;
287 	__be32 rfc1002_marker;
288 
289 	if (cifs_rdma_enabled(server)) {
290 		/* return -EAGAIN when connecting or reconnecting */
291 		rc = -EAGAIN;
292 		if (server->smbd_conn)
293 			rc = smbd_send(server, num_rqst, rqst);
294 		goto smbd_done;
295 	}
296 	if (ssocket == NULL)
297 		return -ENOTSOCK;
298 
299 	/* cork the socket */
300 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
301 				(char *)&val, sizeof(val));
302 
303 	for (j = 0; j < num_rqst; j++)
304 		send_length += smb_rqst_len(server, &rqst[j]);
305 	rfc1002_marker = cpu_to_be32(send_length);
306 
307 	/* Generate a rfc1002 marker for SMB2+ */
308 	if (server->vals->header_preamble_size == 0) {
309 		struct kvec hiov = {
310 			.iov_base = &rfc1002_marker,
311 			.iov_len  = 4
312 		};
313 		iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
314 			      1, 4);
315 		rc = smb_send_kvec(server, &smb_msg, &sent);
316 		if (rc < 0)
317 			goto uncork;
318 
319 		total_len += sent;
320 		send_length += 4;
321 	}
322 
323 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
324 
325 	for (j = 0; j < num_rqst; j++) {
326 		iov = rqst[j].rq_iov;
327 		n_vec = rqst[j].rq_nvec;
328 
329 		size = 0;
330 		for (i = 0; i < n_vec; i++) {
331 			dump_smb(iov[i].iov_base, iov[i].iov_len);
332 			size += iov[i].iov_len;
333 		}
334 
335 		iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
336 			      iov, n_vec, size);
337 
338 		rc = smb_send_kvec(server, &smb_msg, &sent);
339 		if (rc < 0)
340 			goto uncork;
341 
342 		total_len += sent;
343 
344 		/* now walk the page array and send each page in it */
345 		for (i = 0; i < rqst[j].rq_npages; i++) {
346 			struct bio_vec bvec;
347 
348 			bvec.bv_page = rqst[j].rq_pages[i];
349 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
350 					     &bvec.bv_offset);
351 
352 			iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
353 				      &bvec, 1, bvec.bv_len);
354 			rc = smb_send_kvec(server, &smb_msg, &sent);
355 			if (rc < 0)
356 				break;
357 
358 			total_len += sent;
359 		}
360 	}
361 
362 uncork:
363 	/* uncork it */
364 	val = 0;
365 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
366 				(char *)&val, sizeof(val));
367 
368 	if ((total_len > 0) && (total_len != send_length)) {
369 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
370 			 send_length, total_len);
371 		/*
372 		 * If we have only sent part of an SMB then the next SMB could
373 		 * be taken as the remainder of this one. We need to kill the
374 		 * socket so the server throws away the partial SMB
375 		 */
376 		server->tcpStatus = CifsNeedReconnect;
377 		trace_smb3_partial_send_reconnect(server->CurrentMid,
378 						  server->hostname);
379 	}
380 smbd_done:
381 	if (rc < 0 && rc != -EINTR)
382 		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
383 			 rc);
384 	else if (rc > 0)
385 		rc = 0;
386 
387 	return rc;
388 }
389 
390 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)391 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
392 	      struct smb_rqst *rqst, int flags)
393 {
394 	struct kvec iov;
395 	struct smb2_transform_hdr *tr_hdr;
396 	struct smb_rqst cur_rqst[MAX_COMPOUND];
397 	int rc;
398 
399 	if (!(flags & CIFS_TRANSFORM_REQ))
400 		return __smb_send_rqst(server, num_rqst, rqst);
401 
402 	if (num_rqst > MAX_COMPOUND - 1)
403 		return -ENOMEM;
404 
405 	if (!server->ops->init_transform_rq) {
406 		cifs_dbg(VFS, "Encryption requested but transform callback "
407 			 "is missing\n");
408 		return -EIO;
409 	}
410 
411 	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
412 	if (!tr_hdr)
413 		return -ENOMEM;
414 
415 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
416 	memset(&iov, 0, sizeof(iov));
417 	memset(tr_hdr, 0, sizeof(*tr_hdr));
418 
419 	iov.iov_base = tr_hdr;
420 	iov.iov_len = sizeof(*tr_hdr);
421 	cur_rqst[0].rq_iov = &iov;
422 	cur_rqst[0].rq_nvec = 1;
423 
424 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
425 					    &cur_rqst[0], rqst);
426 	if (rc)
427 		goto out;
428 
429 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
430 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
431 out:
432 	kfree(tr_hdr);
433 	return rc;
434 }
435 
436 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)437 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
438 	 unsigned int smb_buf_length)
439 {
440 	struct kvec iov[2];
441 	struct smb_rqst rqst = { .rq_iov = iov,
442 				 .rq_nvec = 2 };
443 
444 	iov[0].iov_base = smb_buffer;
445 	iov[0].iov_len = 4;
446 	iov[1].iov_base = (char *)smb_buffer + 4;
447 	iov[1].iov_len = smb_buf_length;
448 
449 	return __smb_send_rqst(server, 1, &rqst);
450 }
451 
452 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int timeout,int * credits)453 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
454 		      int *credits)
455 {
456 	int rc;
457 
458 	spin_lock(&server->req_lock);
459 	if (timeout == CIFS_ASYNC_OP) {
460 		/* oplock breaks must not be held up */
461 		server->in_flight++;
462 		*credits -= 1;
463 		spin_unlock(&server->req_lock);
464 		return 0;
465 	}
466 
467 	while (1) {
468 		if (*credits <= 0) {
469 			spin_unlock(&server->req_lock);
470 			cifs_num_waiters_inc(server);
471 			rc = wait_event_killable(server->request_q,
472 						 has_credits(server, credits));
473 			cifs_num_waiters_dec(server);
474 			if (rc)
475 				return rc;
476 			spin_lock(&server->req_lock);
477 		} else {
478 			if (server->tcpStatus == CifsExiting) {
479 				spin_unlock(&server->req_lock);
480 				return -ENOENT;
481 			}
482 
483 			/*
484 			 * Can not count locking commands against total
485 			 * as they are allowed to block on server.
486 			 */
487 
488 			/* update # of requests on the wire to server */
489 			if (timeout != CIFS_BLOCKING_OP) {
490 				*credits -= 1;
491 				server->in_flight++;
492 			}
493 			spin_unlock(&server->req_lock);
494 			break;
495 		}
496 	}
497 	return 0;
498 }
499 
500 static int
wait_for_free_request(struct TCP_Server_Info * server,const int timeout,const int optype)501 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
502 		      const int optype)
503 {
504 	int *val;
505 
506 	val = server->ops->get_credits_field(server, optype);
507 	/* Since an echo is already inflight, no need to wait to send another */
508 	if (*val <= 0 && optype == CIFS_ECHO_OP)
509 		return -EAGAIN;
510 	return wait_for_free_credits(server, timeout, val);
511 }
512 
513 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,unsigned int * credits)514 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
515 		      unsigned int *num, unsigned int *credits)
516 {
517 	*num = size;
518 	*credits = 0;
519 	return 0;
520 }
521 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)522 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
523 			struct mid_q_entry **ppmidQ)
524 {
525 	if (ses->server->tcpStatus == CifsExiting) {
526 		return -ENOENT;
527 	}
528 
529 	if (ses->server->tcpStatus == CifsNeedReconnect) {
530 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
531 		return -EAGAIN;
532 	}
533 
534 	if (ses->status == CifsNew) {
535 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
536 			(in_buf->Command != SMB_COM_NEGOTIATE))
537 			return -EAGAIN;
538 		/* else ok - we are setting up session */
539 	}
540 
541 	if (ses->status == CifsExiting) {
542 		/* check if SMB session is bad because we are setting it up */
543 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
544 			return -EAGAIN;
545 		/* else ok - we are shutting down session */
546 	}
547 
548 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
549 	if (*ppmidQ == NULL)
550 		return -ENOMEM;
551 	spin_lock(&GlobalMid_Lock);
552 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
553 	spin_unlock(&GlobalMid_Lock);
554 	return 0;
555 }
556 
557 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)558 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
559 {
560 	int error;
561 
562 	error = wait_event_freezekillable_unsafe(server->response_q,
563 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
564 	if (error < 0)
565 		return -ERESTARTSYS;
566 
567 	return 0;
568 }
569 
570 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)571 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
572 {
573 	int rc;
574 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
575 	struct mid_q_entry *mid;
576 
577 	if (rqst->rq_iov[0].iov_len != 4 ||
578 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
579 		return ERR_PTR(-EIO);
580 
581 	/* enable signing if server requires it */
582 	if (server->sign)
583 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
584 
585 	mid = AllocMidQEntry(hdr, server);
586 	if (mid == NULL)
587 		return ERR_PTR(-ENOMEM);
588 
589 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
590 	if (rc) {
591 		DeleteMidQEntry(mid);
592 		return ERR_PTR(rc);
593 	}
594 
595 	return mid;
596 }
597 
598 /*
599  * Send a SMB request and set the callback function in the mid to handle
600  * the result. Caller is responsible for dealing with timeouts.
601  */
602 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags)603 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
604 		mid_receive_t *receive, mid_callback_t *callback,
605 		mid_handle_t *handle, void *cbdata, const int flags)
606 {
607 	int rc, timeout, optype;
608 	struct mid_q_entry *mid;
609 	unsigned int credits = 0;
610 
611 	timeout = flags & CIFS_TIMEOUT_MASK;
612 	optype = flags & CIFS_OP_MASK;
613 
614 	if ((flags & CIFS_HAS_CREDITS) == 0) {
615 		rc = wait_for_free_request(server, timeout, optype);
616 		if (rc)
617 			return rc;
618 		credits = 1;
619 	}
620 
621 	mutex_lock(&server->srv_mutex);
622 	mid = server->ops->setup_async_request(server, rqst);
623 	if (IS_ERR(mid)) {
624 		mutex_unlock(&server->srv_mutex);
625 		add_credits_and_wake_if(server, credits, optype);
626 		return PTR_ERR(mid);
627 	}
628 
629 	mid->receive = receive;
630 	mid->callback = callback;
631 	mid->callback_data = cbdata;
632 	mid->handle = handle;
633 	mid->mid_state = MID_REQUEST_SUBMITTED;
634 
635 	/* put it on the pending_mid_q */
636 	spin_lock(&GlobalMid_Lock);
637 	list_add_tail(&mid->qhead, &server->pending_mid_q);
638 	spin_unlock(&GlobalMid_Lock);
639 
640 	/*
641 	 * Need to store the time in mid before calling I/O. For call_async,
642 	 * I/O response may come back and free the mid entry on another thread.
643 	 */
644 	cifs_save_when_sent(mid);
645 	cifs_in_send_inc(server);
646 	rc = smb_send_rqst(server, 1, rqst, flags);
647 	cifs_in_send_dec(server);
648 
649 	if (rc < 0) {
650 		revert_current_mid(server, mid->credits);
651 		server->sequence_number -= 2;
652 		cifs_delete_mid(mid);
653 	}
654 
655 	mutex_unlock(&server->srv_mutex);
656 
657 	if (rc == 0)
658 		return 0;
659 
660 	add_credits_and_wake_if(server, credits, optype);
661 	return rc;
662 }
663 
664 /*
665  *
666  * Send an SMB Request.  No response info (other than return code)
667  * needs to be parsed.
668  *
669  * flags indicate the type of request buffer and how long to wait
670  * and whether to log NT STATUS code (error) before mapping it to POSIX error
671  *
672  */
673 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)674 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
675 		 char *in_buf, int flags)
676 {
677 	int rc;
678 	struct kvec iov[1];
679 	struct kvec rsp_iov;
680 	int resp_buf_type;
681 
682 	iov[0].iov_base = in_buf;
683 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
684 	flags |= CIFS_NO_RESP;
685 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
686 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
687 
688 	return rc;
689 }
690 
691 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)692 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
693 {
694 	int rc = 0;
695 
696 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
697 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
698 
699 	spin_lock(&GlobalMid_Lock);
700 	switch (mid->mid_state) {
701 	case MID_RESPONSE_RECEIVED:
702 		spin_unlock(&GlobalMid_Lock);
703 		return rc;
704 	case MID_RETRY_NEEDED:
705 		rc = -EAGAIN;
706 		break;
707 	case MID_RESPONSE_MALFORMED:
708 		rc = -EIO;
709 		break;
710 	case MID_SHUTDOWN:
711 		rc = -EHOSTDOWN;
712 		break;
713 	default:
714 		list_del_init(&mid->qhead);
715 		cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
716 			 __func__, mid->mid, mid->mid_state);
717 		rc = -EIO;
718 	}
719 	spin_unlock(&GlobalMid_Lock);
720 
721 	DeleteMidQEntry(mid);
722 	return rc;
723 }
724 
725 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)726 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
727 	    struct mid_q_entry *mid)
728 {
729 	return server->ops->send_cancel ?
730 				server->ops->send_cancel(server, rqst, mid) : 0;
731 }
732 
733 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)734 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
735 		   bool log_error)
736 {
737 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
738 
739 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
740 
741 	/* convert the length into a more usable form */
742 	if (server->sign) {
743 		struct kvec iov[2];
744 		int rc = 0;
745 		struct smb_rqst rqst = { .rq_iov = iov,
746 					 .rq_nvec = 2 };
747 
748 		iov[0].iov_base = mid->resp_buf;
749 		iov[0].iov_len = 4;
750 		iov[1].iov_base = (char *)mid->resp_buf + 4;
751 		iov[1].iov_len = len - 4;
752 		/* FIXME: add code to kill session */
753 		rc = cifs_verify_signature(&rqst, server,
754 					   mid->sequence_number);
755 		if (rc)
756 			cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
757 				 rc);
758 	}
759 
760 	/* BB special case reconnect tid and uid here? */
761 	return map_smb_to_linux_error(mid->resp_buf, log_error);
762 }
763 
764 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)765 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
766 {
767 	int rc;
768 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
769 	struct mid_q_entry *mid;
770 
771 	if (rqst->rq_iov[0].iov_len != 4 ||
772 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
773 		return ERR_PTR(-EIO);
774 
775 	rc = allocate_mid(ses, hdr, &mid);
776 	if (rc)
777 		return ERR_PTR(rc);
778 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
779 	if (rc) {
780 		cifs_delete_mid(mid);
781 		return ERR_PTR(rc);
782 	}
783 	return mid;
784 }
785 
786 static void
cifs_noop_callback(struct mid_q_entry * mid)787 cifs_noop_callback(struct mid_q_entry *mid)
788 {
789 }
790 
791 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)792 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
793 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
794 		   int *resp_buf_type, struct kvec *resp_iov)
795 {
796 	int i, j, rc = 0;
797 	int timeout, optype;
798 	struct mid_q_entry *midQ[MAX_COMPOUND];
799 	bool cancelled_mid[MAX_COMPOUND] = {false};
800 	unsigned int credits[MAX_COMPOUND] = {0};
801 	char *buf;
802 
803 	timeout = flags & CIFS_TIMEOUT_MASK;
804 	optype = flags & CIFS_OP_MASK;
805 
806 	for (i = 0; i < num_rqst; i++)
807 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
808 
809 	if ((ses == NULL) || (ses->server == NULL)) {
810 		cifs_dbg(VFS, "Null session\n");
811 		return -EIO;
812 	}
813 
814 	if (ses->server->tcpStatus == CifsExiting)
815 		return -ENOENT;
816 
817 	/*
818 	 * Ensure we obtain 1 credit per request in the compound chain.
819 	 * It can be optimized further by waiting for all the credits
820 	 * at once but this can wait long enough if we don't have enough
821 	 * credits due to some heavy operations in progress or the server
822 	 * not granting us much, so a fallback to the current approach is
823 	 * needed anyway.
824 	 */
825 	for (i = 0; i < num_rqst; i++) {
826 		rc = wait_for_free_request(ses->server, timeout, optype);
827 		if (rc) {
828 			/*
829 			 * We haven't sent an SMB packet to the server yet but
830 			 * we already obtained credits for i requests in the
831 			 * compound chain - need to return those credits back
832 			 * for future use. Note that we need to call add_credits
833 			 * multiple times to match the way we obtained credits
834 			 * in the first place and to account for in flight
835 			 * requests correctly.
836 			 */
837 			for (j = 0; j < i; j++)
838 				add_credits(ses->server, 1, optype);
839 			return rc;
840 		}
841 		credits[i] = 1;
842 	}
843 
844 	/*
845 	 * Make sure that we sign in the same order that we send on this socket
846 	 * and avoid races inside tcp sendmsg code that could cause corruption
847 	 * of smb data.
848 	 */
849 
850 	mutex_lock(&ses->server->srv_mutex);
851 
852 	for (i = 0; i < num_rqst; i++) {
853 		midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
854 		if (IS_ERR(midQ[i])) {
855 			revert_current_mid(ses->server, i);
856 			for (j = 0; j < i; j++)
857 				cifs_delete_mid(midQ[j]);
858 			mutex_unlock(&ses->server->srv_mutex);
859 
860 			/* Update # of requests on wire to server */
861 			for (j = 0; j < num_rqst; j++)
862 				add_credits(ses->server, credits[j], optype);
863 			return PTR_ERR(midQ[i]);
864 		}
865 
866 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
867 		/*
868 		 * We don't invoke the callback compounds unless it is the last
869 		 * request.
870 		 */
871 		if (i < num_rqst - 1)
872 			midQ[i]->callback = cifs_noop_callback;
873 	}
874 	cifs_in_send_inc(ses->server);
875 	rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
876 	cifs_in_send_dec(ses->server);
877 
878 	for (i = 0; i < num_rqst; i++)
879 		cifs_save_when_sent(midQ[i]);
880 
881 	if (rc < 0) {
882 		revert_current_mid(ses->server, num_rqst);
883 		ses->server->sequence_number -= 2;
884 	}
885 
886 	mutex_unlock(&ses->server->srv_mutex);
887 
888 	if (rc < 0)
889 		goto out;
890 
891 	/*
892 	 * Compounding is never used during session establish.
893 	 */
894 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
895 		mutex_lock(&ses->server->srv_mutex);
896 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
897 					   rqst[0].rq_nvec);
898 		mutex_unlock(&ses->server->srv_mutex);
899 	}
900 
901 	if (timeout == CIFS_ASYNC_OP)
902 		goto out;
903 
904 	for (i = 0; i < num_rqst; i++) {
905 		rc = wait_for_response(ses->server, midQ[i]);
906 		if (rc != 0) {
907 			cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
908 				 midQ[i]->mid);
909 			send_cancel(ses->server, &rqst[i], midQ[i]);
910 			spin_lock(&GlobalMid_Lock);
911 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
912 				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
913 				midQ[i]->callback = DeleteMidQEntry;
914 				cancelled_mid[i] = true;
915 			}
916 			spin_unlock(&GlobalMid_Lock);
917 		}
918 	}
919 
920 	for (i = 0; i < num_rqst; i++)
921 		if (!cancelled_mid[i] && midQ[i]->resp_buf
922 		    && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
923 			credits[i] = ses->server->ops->get_credits(midQ[i]);
924 
925 	for (i = 0; i < num_rqst; i++) {
926 		if (rc < 0)
927 			goto out;
928 
929 		rc = cifs_sync_mid_result(midQ[i], ses->server);
930 		if (rc != 0) {
931 			/* mark this mid as cancelled to not free it below */
932 			cancelled_mid[i] = true;
933 			goto out;
934 		}
935 
936 		if (!midQ[i]->resp_buf ||
937 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
938 			rc = -EIO;
939 			cifs_dbg(FYI, "Bad MID state?\n");
940 			goto out;
941 		}
942 
943 		buf = (char *)midQ[i]->resp_buf;
944 		resp_iov[i].iov_base = buf;
945 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
946 			ses->server->vals->header_preamble_size;
947 
948 		if (midQ[i]->large_buf)
949 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
950 		else
951 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
952 
953 		rc = ses->server->ops->check_receive(midQ[i], ses->server,
954 						     flags & CIFS_LOG_ERROR);
955 
956 		/* mark it so buf will not be freed by cifs_delete_mid */
957 		if ((flags & CIFS_NO_RESP) == 0)
958 			midQ[i]->resp_buf = NULL;
959 
960 	}
961 
962 	/*
963 	 * Compounding is never used during session establish.
964 	 */
965 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
966 		struct kvec iov = {
967 			.iov_base = resp_iov[0].iov_base,
968 			.iov_len = resp_iov[0].iov_len
969 		};
970 		mutex_lock(&ses->server->srv_mutex);
971 		smb311_update_preauth_hash(ses, &iov, 1);
972 		mutex_unlock(&ses->server->srv_mutex);
973 	}
974 
975 out:
976 	/*
977 	 * This will dequeue all mids. After this it is important that the
978 	 * demultiplex_thread will not process any of these mids any futher.
979 	 * This is prevented above by using a noop callback that will not
980 	 * wake this thread except for the very last PDU.
981 	 */
982 	for (i = 0; i < num_rqst; i++) {
983 		if (!cancelled_mid[i])
984 			cifs_delete_mid(midQ[i]);
985 		add_credits(ses->server, credits[i], optype);
986 	}
987 
988 	return rc;
989 }
990 
991 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)992 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
993 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
994 	       struct kvec *resp_iov)
995 {
996 	return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
997 				  resp_iov);
998 }
999 
1000 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1001 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1002 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1003 	     const int flags, struct kvec *resp_iov)
1004 {
1005 	struct smb_rqst rqst;
1006 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1007 	int rc;
1008 
1009 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1010 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1011 					GFP_KERNEL);
1012 		if (!new_iov) {
1013 			/* otherwise cifs_send_recv below sets resp_buf_type */
1014 			*resp_buf_type = CIFS_NO_BUFFER;
1015 			return -ENOMEM;
1016 		}
1017 	} else
1018 		new_iov = s_iov;
1019 
1020 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1021 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1022 
1023 	new_iov[0].iov_base = new_iov[1].iov_base;
1024 	new_iov[0].iov_len = 4;
1025 	new_iov[1].iov_base += 4;
1026 	new_iov[1].iov_len -= 4;
1027 
1028 	memset(&rqst, 0, sizeof(struct smb_rqst));
1029 	rqst.rq_iov = new_iov;
1030 	rqst.rq_nvec = n_vec + 1;
1031 
1032 	rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1033 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1034 		kfree(new_iov);
1035 	return rc;
1036 }
1037 
1038 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int timeout)1039 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1040 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1041 	    int *pbytes_returned, const int timeout)
1042 {
1043 	int rc = 0;
1044 	struct mid_q_entry *midQ;
1045 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1046 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1047 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1048 
1049 	if (ses == NULL) {
1050 		cifs_dbg(VFS, "Null smb session\n");
1051 		return -EIO;
1052 	}
1053 	if (ses->server == NULL) {
1054 		cifs_dbg(VFS, "Null tcp session\n");
1055 		return -EIO;
1056 	}
1057 
1058 	if (ses->server->tcpStatus == CifsExiting)
1059 		return -ENOENT;
1060 
1061 	/* Ensure that we do not send more than 50 overlapping requests
1062 	   to the same server. We may make this configurable later or
1063 	   use ses->maxReq */
1064 
1065 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1066 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1067 			 len);
1068 		return -EIO;
1069 	}
1070 
1071 	rc = wait_for_free_request(ses->server, timeout, 0);
1072 	if (rc)
1073 		return rc;
1074 
1075 	/* make sure that we sign in the same order that we send on this socket
1076 	   and avoid races inside tcp sendmsg code that could cause corruption
1077 	   of smb data */
1078 
1079 	mutex_lock(&ses->server->srv_mutex);
1080 
1081 	rc = allocate_mid(ses, in_buf, &midQ);
1082 	if (rc) {
1083 		mutex_unlock(&ses->server->srv_mutex);
1084 		/* Update # of requests on wire to server */
1085 		add_credits(ses->server, 1, 0);
1086 		return rc;
1087 	}
1088 
1089 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1090 	if (rc) {
1091 		mutex_unlock(&ses->server->srv_mutex);
1092 		goto out;
1093 	}
1094 
1095 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1096 
1097 	cifs_in_send_inc(ses->server);
1098 	rc = smb_send(ses->server, in_buf, len);
1099 	cifs_in_send_dec(ses->server);
1100 	cifs_save_when_sent(midQ);
1101 
1102 	if (rc < 0)
1103 		ses->server->sequence_number -= 2;
1104 
1105 	mutex_unlock(&ses->server->srv_mutex);
1106 
1107 	if (rc < 0)
1108 		goto out;
1109 
1110 	if (timeout == CIFS_ASYNC_OP)
1111 		goto out;
1112 
1113 	rc = wait_for_response(ses->server, midQ);
1114 	if (rc != 0) {
1115 		send_cancel(ses->server, &rqst, midQ);
1116 		spin_lock(&GlobalMid_Lock);
1117 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1118 			/* no longer considered to be "in-flight" */
1119 			midQ->callback = DeleteMidQEntry;
1120 			spin_unlock(&GlobalMid_Lock);
1121 			add_credits(ses->server, 1, 0);
1122 			return rc;
1123 		}
1124 		spin_unlock(&GlobalMid_Lock);
1125 	}
1126 
1127 	rc = cifs_sync_mid_result(midQ, ses->server);
1128 	if (rc != 0) {
1129 		add_credits(ses->server, 1, 0);
1130 		return rc;
1131 	}
1132 
1133 	if (!midQ->resp_buf || !out_buf ||
1134 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1135 		rc = -EIO;
1136 		cifs_dbg(VFS, "Bad MID state?\n");
1137 		goto out;
1138 	}
1139 
1140 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1141 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1142 	rc = cifs_check_receive(midQ, ses->server, 0);
1143 out:
1144 	cifs_delete_mid(midQ);
1145 	add_credits(ses->server, 1, 0);
1146 
1147 	return rc;
1148 }
1149 
1150 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1151    blocking lock to return. */
1152 
1153 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1154 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1155 			struct smb_hdr *in_buf,
1156 			struct smb_hdr *out_buf)
1157 {
1158 	int bytes_returned;
1159 	struct cifs_ses *ses = tcon->ses;
1160 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1161 
1162 	/* We just modify the current in_buf to change
1163 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1164 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1165 	   LOCKING_ANDX_CANCEL_LOCK. */
1166 
1167 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1168 	pSMB->Timeout = 0;
1169 	pSMB->hdr.Mid = get_next_mid(ses->server);
1170 
1171 	return SendReceive(xid, ses, in_buf, out_buf,
1172 			&bytes_returned, 0);
1173 }
1174 
1175 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1176 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1177 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1178 	    int *pbytes_returned)
1179 {
1180 	int rc = 0;
1181 	int rstart = 0;
1182 	struct mid_q_entry *midQ;
1183 	struct cifs_ses *ses;
1184 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1185 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1186 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1187 
1188 	if (tcon == NULL || tcon->ses == NULL) {
1189 		cifs_dbg(VFS, "Null smb session\n");
1190 		return -EIO;
1191 	}
1192 	ses = tcon->ses;
1193 
1194 	if (ses->server == NULL) {
1195 		cifs_dbg(VFS, "Null tcp session\n");
1196 		return -EIO;
1197 	}
1198 
1199 	if (ses->server->tcpStatus == CifsExiting)
1200 		return -ENOENT;
1201 
1202 	/* Ensure that we do not send more than 50 overlapping requests
1203 	   to the same server. We may make this configurable later or
1204 	   use ses->maxReq */
1205 
1206 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1207 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1208 			 len);
1209 		return -EIO;
1210 	}
1211 
1212 	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1213 	if (rc)
1214 		return rc;
1215 
1216 	/* make sure that we sign in the same order that we send on this socket
1217 	   and avoid races inside tcp sendmsg code that could cause corruption
1218 	   of smb data */
1219 
1220 	mutex_lock(&ses->server->srv_mutex);
1221 
1222 	rc = allocate_mid(ses, in_buf, &midQ);
1223 	if (rc) {
1224 		mutex_unlock(&ses->server->srv_mutex);
1225 		return rc;
1226 	}
1227 
1228 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1229 	if (rc) {
1230 		cifs_delete_mid(midQ);
1231 		mutex_unlock(&ses->server->srv_mutex);
1232 		return rc;
1233 	}
1234 
1235 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1236 	cifs_in_send_inc(ses->server);
1237 	rc = smb_send(ses->server, in_buf, len);
1238 	cifs_in_send_dec(ses->server);
1239 	cifs_save_when_sent(midQ);
1240 
1241 	if (rc < 0)
1242 		ses->server->sequence_number -= 2;
1243 
1244 	mutex_unlock(&ses->server->srv_mutex);
1245 
1246 	if (rc < 0) {
1247 		cifs_delete_mid(midQ);
1248 		return rc;
1249 	}
1250 
1251 	/* Wait for a reply - allow signals to interrupt. */
1252 	rc = wait_event_interruptible(ses->server->response_q,
1253 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1254 		((ses->server->tcpStatus != CifsGood) &&
1255 		 (ses->server->tcpStatus != CifsNew)));
1256 
1257 	/* Were we interrupted by a signal ? */
1258 	if ((rc == -ERESTARTSYS) &&
1259 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1260 		((ses->server->tcpStatus == CifsGood) ||
1261 		 (ses->server->tcpStatus == CifsNew))) {
1262 
1263 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1264 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1265 			   blocking lock to return. */
1266 			rc = send_cancel(ses->server, &rqst, midQ);
1267 			if (rc) {
1268 				cifs_delete_mid(midQ);
1269 				return rc;
1270 			}
1271 		} else {
1272 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1273 			   to cause the blocking lock to return. */
1274 
1275 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1276 
1277 			/* If we get -ENOLCK back the lock may have
1278 			   already been removed. Don't exit in this case. */
1279 			if (rc && rc != -ENOLCK) {
1280 				cifs_delete_mid(midQ);
1281 				return rc;
1282 			}
1283 		}
1284 
1285 		rc = wait_for_response(ses->server, midQ);
1286 		if (rc) {
1287 			send_cancel(ses->server, &rqst, midQ);
1288 			spin_lock(&GlobalMid_Lock);
1289 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1290 				/* no longer considered to be "in-flight" */
1291 				midQ->callback = DeleteMidQEntry;
1292 				spin_unlock(&GlobalMid_Lock);
1293 				return rc;
1294 			}
1295 			spin_unlock(&GlobalMid_Lock);
1296 		}
1297 
1298 		/* We got the response - restart system call. */
1299 		rstart = 1;
1300 	}
1301 
1302 	rc = cifs_sync_mid_result(midQ, ses->server);
1303 	if (rc != 0)
1304 		return rc;
1305 
1306 	/* rcvd frame is ok */
1307 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1308 		rc = -EIO;
1309 		cifs_dbg(VFS, "Bad MID state?\n");
1310 		goto out;
1311 	}
1312 
1313 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1314 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1315 	rc = cifs_check_receive(midQ, ses->server, 0);
1316 out:
1317 	cifs_delete_mid(midQ);
1318 	if (rstart && rc == -EACCES)
1319 		return -ERESTARTSYS;
1320 	return rc;
1321 }
1322