Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <asm/uaccess.h>
31 #include <asm/processor.h>
32 #include <linux/mempool.h>
33 #include "cifspdu.h"
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37
38 extern mempool_t *cifs_mid_poolp;
39
40 static void
41 wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cERROR(1, "Null TCP session in AllocMidQEntry");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68
69 /*
70 * The default is for the mid to be synchronous, so the
71 * default callback just wakes up the current task.
72 */
73 temp->callback = wake_up_task;
74 temp->callback_data = current;
75 }
76
77 atomic_inc(&midCount);
78 temp->mid_state = MID_REQUEST_ALLOCATED;
79 return temp;
80 }
81
82 void
83 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 {
85 #ifdef CONFIG_CIFS_STATS2
86 unsigned long now;
87 #endif
88 midEntry->mid_state = MID_FREE;
89 atomic_dec(&midCount);
90 if (midEntry->large_buf)
91 cifs_buf_release(midEntry->resp_buf);
92 else
93 cifs_small_buf_release(midEntry->resp_buf);
94 #ifdef CONFIG_CIFS_STATS2
95 now = jiffies;
96 /* commands taking longer than one second are indications that
97 something is wrong, unless it is quite a slow link or server */
98 if ((now - midEntry->when_alloc) > HZ) {
99 if ((cifsFYI & CIFS_TIMER) &&
100 (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
101 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
102 midEntry->command, midEntry->mid);
103 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104 now - midEntry->when_alloc,
105 now - midEntry->when_sent,
106 now - midEntry->when_received);
107 }
108 }
109 #endif
110 mempool_free(midEntry, cifs_mid_poolp);
111 }
112
113 static void
114 delete_mid(struct mid_q_entry *mid)
115 {
116 spin_lock(&GlobalMid_Lock);
117 list_del(&mid->qhead);
118 spin_unlock(&GlobalMid_Lock);
119
120 DeleteMidQEntry(mid);
121 }
122
123 static int
124 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 {
126 int rc = 0;
127 int i = 0;
128 struct msghdr smb_msg;
129 __be32 *buf_len = (__be32 *)(iov[0].iov_base);
130 unsigned int len = iov[0].iov_len;
131 unsigned int total_len;
132 int first_vec = 0;
133 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
134 struct socket *ssocket = server->ssocket;
135
136 if (ssocket == NULL)
137 return -ENOTSOCK; /* BB eventually add reconnect code here */
138
139 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
140 smb_msg.msg_namelen = sizeof(struct sockaddr);
141 smb_msg.msg_control = NULL;
142 smb_msg.msg_controllen = 0;
143 if (server->noblocksnd)
144 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145 else
146 smb_msg.msg_flags = MSG_NOSIGNAL;
147
148 total_len = 0;
149 for (i = 0; i < n_vec; i++)
150 total_len += iov[i].iov_len;
151
152 cFYI(1, "Sending smb: total_len %d", total_len);
153 dump_smb(iov[0].iov_base, len);
154
155 i = 0;
156 while (total_len) {
157 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
158 n_vec - first_vec, total_len);
159 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 i++;
161 /*
162 * If blocking send we try 3 times, since each can block
163 * for 5 seconds. For nonblocking we have to try more
164 * but wait increasing amounts of time allowing time for
165 * socket to clear. The overall time we wait in either
166 * case to send on the socket is about 15 seconds.
167 * Similarly we wait for 15 seconds for a response from
168 * the server in SendReceive[2] for the server to send
169 * a response back for most types of requests (except
170 * SMB Write past end of file which can be slow, and
171 * blocking lock operations). NFS waits slightly longer
172 * than CIFS, but this can make it take longer for
173 * nonresponsive servers to be detected and 15 seconds
174 * is more than enough time for modern networks to
175 * send a packet. In most cases if we fail to send
176 * after the retries we will kill the socket and
177 * reconnect which may clear the network problem.
178 */
179 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180 cERROR(1, "sends on sock %p stuck for 15 seconds",
181 ssocket);
182 rc = -EAGAIN;
183 break;
184 }
185 msleep(1 << i);
186 continue;
187 }
188 if (rc < 0)
189 break;
190
191 if (rc == total_len) {
192 total_len = 0;
193 break;
194 } else if (rc > total_len) {
195 cERROR(1, "sent %d requested %d", rc, total_len);
196 break;
197 }
198 if (rc == 0) {
199 /* should never happen, letting socket clear before
200 retrying is our only obvious option here */
201 cERROR(1, "tcp sent no data");
202 msleep(500);
203 continue;
204 }
205 total_len -= rc;
206 /* the line below resets i */
207 for (i = first_vec; i < n_vec; i++) {
208 if (iov[i].iov_len) {
209 if (rc > iov[i].iov_len) {
210 rc -= iov[i].iov_len;
211 iov[i].iov_len = 0;
212 } else {
213 iov[i].iov_base += rc;
214 iov[i].iov_len -= rc;
215 first_vec = i;
216 break;
217 }
218 }
219 }
220 i = 0; /* in case we get ENOSPC on the next send */
221 }
222
223 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
224 cFYI(1, "partial send (%d remaining), terminating session",
225 total_len);
226 /* If we have only sent part of an SMB then the next SMB
227 could be taken as the remainder of this one. We need
228 to kill the socket so the server throws away the partial
229 SMB */
230 server->tcpStatus = CifsNeedReconnect;
231 }
232
233 if (rc < 0 && rc != -EINTR)
234 cERROR(1, "Error %d sending data on socket to server", rc);
235 else
236 rc = 0;
237
238 /* Don't want to modify the buffer as a side effect of this call. */
239 *buf_len = cpu_to_be32(smb_buf_length);
240
241 return rc;
242 }
243
244 int
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
247 {
248 struct kvec iov;
249
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
252
253 return smb_sendv(server, &iov, 1);
254 }
255
256 static int
257 wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
258 int *credits)
259 {
260 int rc;
261
262 spin_lock(&server->req_lock);
263 if (optype == CIFS_ASYNC_OP) {
264 /* oplock breaks must not be held up */
265 server->in_flight++;
266 *credits -= 1;
267 spin_unlock(&server->req_lock);
268 return 0;
269 }
270
271 while (1) {
272 if (*credits <= 0) {
273 spin_unlock(&server->req_lock);
274 cifs_num_waiters_inc(server);
275 rc = wait_event_killable(server->request_q,
276 has_credits(server, credits));
277 cifs_num_waiters_dec(server);
278 if (rc)
279 return rc;
280 spin_lock(&server->req_lock);
281 } else {
282 if (server->tcpStatus == CifsExiting) {
283 spin_unlock(&server->req_lock);
284 return -ENOENT;
285 }
286
287 /*
288 * Can not count locking commands against total
289 * as they are allowed to block on server.
290 */
291
292 /* update # of requests on the wire to server */
293 if (optype != CIFS_BLOCKING_OP) {
294 *credits -= 1;
295 server->in_flight++;
296 }
297 spin_unlock(&server->req_lock);
298 break;
299 }
300 }
301 return 0;
302 }
303
304 static int
305 wait_for_free_request(struct TCP_Server_Info *server, const int optype)
306 {
307 return wait_for_free_credits(server, optype,
308 server->ops->get_credits_field(server));
309 }
310
311 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
312 struct mid_q_entry **ppmidQ)
313 {
314 if (ses->server->tcpStatus == CifsExiting) {
315 return -ENOENT;
316 }
317
318 if (ses->server->tcpStatus == CifsNeedReconnect) {
319 cFYI(1, "tcp session dead - return to caller to retry");
320 return -EAGAIN;
321 }
322
323 if (ses->status != CifsGood) {
324 /* check if SMB session is bad because we are setting it up */
325 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
326 (in_buf->Command != SMB_COM_NEGOTIATE))
327 return -EAGAIN;
328 /* else ok - we are setting up session */
329 }
330 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
331 if (*ppmidQ == NULL)
332 return -ENOMEM;
333 spin_lock(&GlobalMid_Lock);
334 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
335 spin_unlock(&GlobalMid_Lock);
336 return 0;
337 }
338
339 static int
340 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
341 {
342 int error;
343
344 error = wait_event_freezekillable(server->response_q,
345 midQ->mid_state != MID_REQUEST_SUBMITTED);
346 if (error < 0)
347 return -ERESTARTSYS;
348
349 return 0;
350 }
351
352 static int
353 cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
354 unsigned int nvec, struct mid_q_entry **ret_mid)
355 {
356 int rc;
357 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
358 struct mid_q_entry *mid;
359
360 /* enable signing if server requires it */
361 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
362 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
363
364 mid = AllocMidQEntry(hdr, server);
365 if (mid == NULL)
366 return -ENOMEM;
367
368 /* put it on the pending_mid_q */
369 spin_lock(&GlobalMid_Lock);
370 list_add_tail(&mid->qhead, &server->pending_mid_q);
371 spin_unlock(&GlobalMid_Lock);
372
373 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
374 if (rc)
375 delete_mid(mid);
376 *ret_mid = mid;
377 return rc;
378 }
379
380 /*
381 * Send a SMB request and set the callback function in the mid to handle
382 * the result. Caller is responsible for dealing with timeouts.
383 */
384 int
385 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
386 unsigned int nvec, mid_receive_t *receive,
387 mid_callback_t *callback, void *cbdata, bool ignore_pend)
388 {
389 int rc;
390 struct mid_q_entry *mid;
391
392 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
393 if (rc)
394 return rc;
395
396 mutex_lock(&server->srv_mutex);
397 rc = cifs_setup_async_request(server, iov, nvec, &mid);
398 if (rc) {
399 mutex_unlock(&server->srv_mutex);
400 add_credits(server, 1);
401 wake_up(&server->request_q);
402 return rc;
403 }
404
405 mid->receive = receive;
406 mid->callback = callback;
407 mid->callback_data = cbdata;
408 mid->mid_state = MID_REQUEST_SUBMITTED;
409
410 cifs_in_send_inc(server);
411 rc = smb_sendv(server, iov, nvec);
412 cifs_in_send_dec(server);
413 cifs_save_when_sent(mid);
414 mutex_unlock(&server->srv_mutex);
415
416 if (rc)
417 goto out_err;
418
419 return rc;
420 out_err:
421 delete_mid(mid);
422 add_credits(server, 1);
423 wake_up(&server->request_q);
424 return rc;
425 }
426
427 /*
428 *
429 * Send an SMB Request. No response info (other than return code)
430 * needs to be parsed.
431 *
432 * flags indicate the type of request buffer and how long to wait
433 * and whether to log NT STATUS code (error) before mapping it to POSIX error
434 *
435 */
436 int
437 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
438 char *in_buf, int flags)
439 {
440 int rc;
441 struct kvec iov[1];
442 int resp_buf_type;
443
444 iov[0].iov_base = in_buf;
445 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
446 flags |= CIFS_NO_RESP;
447 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
448 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
449
450 return rc;
451 }
452
453 static int
454 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
455 {
456 int rc = 0;
457
458 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
459 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
460
461 spin_lock(&GlobalMid_Lock);
462 switch (mid->mid_state) {
463 case MID_RESPONSE_RECEIVED:
464 spin_unlock(&GlobalMid_Lock);
465 return rc;
466 case MID_RETRY_NEEDED:
467 rc = -EAGAIN;
468 break;
469 case MID_RESPONSE_MALFORMED:
470 rc = -EIO;
471 break;
472 case MID_SHUTDOWN:
473 rc = -EHOSTDOWN;
474 break;
475 default:
476 list_del_init(&mid->qhead);
477 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
478 mid->mid, mid->mid_state);
479 rc = -EIO;
480 }
481 spin_unlock(&GlobalMid_Lock);
482
483 DeleteMidQEntry(mid);
484 return rc;
485 }
486
487 static inline int
488 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
489 {
490 return server->ops->send_cancel ?
491 server->ops->send_cancel(server, buf, mid) : 0;
492 }
493
494 int
495 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
496 bool log_error)
497 {
498 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
499
500 dump_smb(mid->resp_buf, min_t(u32, 92, len));
501
502 /* convert the length into a more usable form */
503 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
504 struct kvec iov;
505
506 iov.iov_base = mid->resp_buf;
507 iov.iov_len = len;
508 /* FIXME: add code to kill session */
509 if (cifs_verify_signature(&iov, 1, server,
510 mid->sequence_number + 1) != 0)
511 cERROR(1, "Unexpected SMB signature");
512 }
513
514 /* BB special case reconnect tid and uid here? */
515 return map_smb_to_linux_error(mid->resp_buf, log_error);
516 }
517
518 int
519 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
520 unsigned int nvec, struct mid_q_entry **ret_mid)
521 {
522 int rc;
523 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
524 struct mid_q_entry *mid;
525
526 rc = allocate_mid(ses, hdr, &mid);
527 if (rc)
528 return rc;
529 rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
530 if (rc)
531 delete_mid(mid);
532 *ret_mid = mid;
533 return rc;
534 }
535
536 int
537 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
538 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
539 const int flags)
540 {
541 int rc = 0;
542 int long_op;
543 struct mid_q_entry *midQ;
544 char *buf = iov[0].iov_base;
545
546 long_op = flags & CIFS_TIMEOUT_MASK;
547
548 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
549
550 if ((ses == NULL) || (ses->server == NULL)) {
551 cifs_small_buf_release(buf);
552 cERROR(1, "Null session");
553 return -EIO;
554 }
555
556 if (ses->server->tcpStatus == CifsExiting) {
557 cifs_small_buf_release(buf);
558 return -ENOENT;
559 }
560
561 /*
562 * Ensure that we do not send more than 50 overlapping requests
563 * to the same server. We may make this configurable later or
564 * use ses->maxReq.
565 */
566
567 rc = wait_for_free_request(ses->server, long_op);
568 if (rc) {
569 cifs_small_buf_release(buf);
570 return rc;
571 }
572
573 /*
574 * Make sure that we sign in the same order that we send on this socket
575 * and avoid races inside tcp sendmsg code that could cause corruption
576 * of smb data.
577 */
578
579 mutex_lock(&ses->server->srv_mutex);
580
581 rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
582 if (rc) {
583 mutex_unlock(&ses->server->srv_mutex);
584 cifs_small_buf_release(buf);
585 /* Update # of requests on wire to server */
586 add_credits(ses->server, 1);
587 return rc;
588 }
589
590 midQ->mid_state = MID_REQUEST_SUBMITTED;
591 cifs_in_send_inc(ses->server);
592 rc = smb_sendv(ses->server, iov, n_vec);
593 cifs_in_send_dec(ses->server);
594 cifs_save_when_sent(midQ);
595
596 mutex_unlock(&ses->server->srv_mutex);
597
598 if (rc < 0) {
599 cifs_small_buf_release(buf);
600 goto out;
601 }
602
603 if (long_op == CIFS_ASYNC_OP) {
604 cifs_small_buf_release(buf);
605 goto out;
606 }
607
608 rc = wait_for_response(ses->server, midQ);
609 if (rc != 0) {
610 send_cancel(ses->server, buf, midQ);
611 spin_lock(&GlobalMid_Lock);
612 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
613 midQ->callback = DeleteMidQEntry;
614 spin_unlock(&GlobalMid_Lock);
615 cifs_small_buf_release(buf);
616 add_credits(ses->server, 1);
617 return rc;
618 }
619 spin_unlock(&GlobalMid_Lock);
620 }
621
622 cifs_small_buf_release(buf);
623
624 rc = cifs_sync_mid_result(midQ, ses->server);
625 if (rc != 0) {
626 add_credits(ses->server, 1);
627 return rc;
628 }
629
630 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
631 rc = -EIO;
632 cFYI(1, "Bad MID state?");
633 goto out;
634 }
635
636 buf = (char *)midQ->resp_buf;
637 iov[0].iov_base = buf;
638 iov[0].iov_len = get_rfc1002_length(buf) + 4;
639 if (midQ->large_buf)
640 *pRespBufType = CIFS_LARGE_BUFFER;
641 else
642 *pRespBufType = CIFS_SMALL_BUFFER;
643
644 rc = ses->server->ops->check_receive(midQ, ses->server,
645 flags & CIFS_LOG_ERROR);
646
647 /* mark it so buf will not be freed by delete_mid */
648 if ((flags & CIFS_NO_RESP) == 0)
649 midQ->resp_buf = NULL;
650 out:
651 delete_mid(midQ);
652 add_credits(ses->server, 1);
653
654 return rc;
655 }
656
657 int
658 SendReceive(const unsigned int xid, struct cifs_ses *ses,
659 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
660 int *pbytes_returned, const int long_op)
661 {
662 int rc = 0;
663 struct mid_q_entry *midQ;
664
665 if (ses == NULL) {
666 cERROR(1, "Null smb session");
667 return -EIO;
668 }
669 if (ses->server == NULL) {
670 cERROR(1, "Null tcp session");
671 return -EIO;
672 }
673
674 if (ses->server->tcpStatus == CifsExiting)
675 return -ENOENT;
676
677 /* Ensure that we do not send more than 50 overlapping requests
678 to the same server. We may make this configurable later or
679 use ses->maxReq */
680
681 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
682 MAX_CIFS_HDR_SIZE - 4) {
683 cERROR(1, "Illegal length, greater than maximum frame, %d",
684 be32_to_cpu(in_buf->smb_buf_length));
685 return -EIO;
686 }
687
688 rc = wait_for_free_request(ses->server, long_op);
689 if (rc)
690 return rc;
691
692 /* make sure that we sign in the same order that we send on this socket
693 and avoid races inside tcp sendmsg code that could cause corruption
694 of smb data */
695
696 mutex_lock(&ses->server->srv_mutex);
697
698 rc = allocate_mid(ses, in_buf, &midQ);
699 if (rc) {
700 mutex_unlock(&ses->server->srv_mutex);
701 /* Update # of requests on wire to server */
702 add_credits(ses->server, 1);
703 return rc;
704 }
705
706 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
707 if (rc) {
708 mutex_unlock(&ses->server->srv_mutex);
709 goto out;
710 }
711
712 midQ->mid_state = MID_REQUEST_SUBMITTED;
713
714 cifs_in_send_inc(ses->server);
715 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
716 cifs_in_send_dec(ses->server);
717 cifs_save_when_sent(midQ);
718 mutex_unlock(&ses->server->srv_mutex);
719
720 if (rc < 0)
721 goto out;
722
723 if (long_op == CIFS_ASYNC_OP)
724 goto out;
725
726 rc = wait_for_response(ses->server, midQ);
727 if (rc != 0) {
728 send_cancel(ses->server, in_buf, midQ);
729 spin_lock(&GlobalMid_Lock);
730 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
731 /* no longer considered to be "in-flight" */
732 midQ->callback = DeleteMidQEntry;
733 spin_unlock(&GlobalMid_Lock);
734 add_credits(ses->server, 1);
735 return rc;
736 }
737 spin_unlock(&GlobalMid_Lock);
738 }
739
740 rc = cifs_sync_mid_result(midQ, ses->server);
741 if (rc != 0) {
742 add_credits(ses->server, 1);
743 return rc;
744 }
745
746 if (!midQ->resp_buf || !out_buf ||
747 midQ->mid_state != MID_RESPONSE_RECEIVED) {
748 rc = -EIO;
749 cERROR(1, "Bad MID state?");
750 goto out;
751 }
752
753 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
754 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
755 rc = cifs_check_receive(midQ, ses->server, 0);
756 out:
757 delete_mid(midQ);
758 add_credits(ses->server, 1);
759
760 return rc;
761 }
762
763 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
764 blocking lock to return. */
765
766 static int
767 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
768 struct smb_hdr *in_buf,
769 struct smb_hdr *out_buf)
770 {
771 int bytes_returned;
772 struct cifs_ses *ses = tcon->ses;
773 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
774
775 /* We just modify the current in_buf to change
776 the type of lock from LOCKING_ANDX_SHARED_LOCK
777 or LOCKING_ANDX_EXCLUSIVE_LOCK to
778 LOCKING_ANDX_CANCEL_LOCK. */
779
780 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
781 pSMB->Timeout = 0;
782 pSMB->hdr.Mid = get_next_mid(ses->server);
783
784 return SendReceive(xid, ses, in_buf, out_buf,
785 &bytes_returned, 0);
786 }
787
788 int
789 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
790 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
791 int *pbytes_returned)
792 {
793 int rc = 0;
794 int rstart = 0;
795 struct mid_q_entry *midQ;
796 struct cifs_ses *ses;
797
798 if (tcon == NULL || tcon->ses == NULL) {
799 cERROR(1, "Null smb session");
800 return -EIO;
801 }
802 ses = tcon->ses;
803
804 if (ses->server == NULL) {
805 cERROR(1, "Null tcp session");
806 return -EIO;
807 }
808
809 if (ses->server->tcpStatus == CifsExiting)
810 return -ENOENT;
811
812 /* Ensure that we do not send more than 50 overlapping requests
813 to the same server. We may make this configurable later or
814 use ses->maxReq */
815
816 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
817 MAX_CIFS_HDR_SIZE - 4) {
818 cERROR(1, "Illegal length, greater than maximum frame, %d",
819 be32_to_cpu(in_buf->smb_buf_length));
820 return -EIO;
821 }
822
823 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
824 if (rc)
825 return rc;
826
827 /* make sure that we sign in the same order that we send on this socket
828 and avoid races inside tcp sendmsg code that could cause corruption
829 of smb data */
830
831 mutex_lock(&ses->server->srv_mutex);
832
833 rc = allocate_mid(ses, in_buf, &midQ);
834 if (rc) {
835 mutex_unlock(&ses->server->srv_mutex);
836 return rc;
837 }
838
839 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
840 if (rc) {
841 delete_mid(midQ);
842 mutex_unlock(&ses->server->srv_mutex);
843 return rc;
844 }
845
846 midQ->mid_state = MID_REQUEST_SUBMITTED;
847 cifs_in_send_inc(ses->server);
848 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
849 cifs_in_send_dec(ses->server);
850 cifs_save_when_sent(midQ);
851 mutex_unlock(&ses->server->srv_mutex);
852
853 if (rc < 0) {
854 delete_mid(midQ);
855 return rc;
856 }
857
858 /* Wait for a reply - allow signals to interrupt. */
859 rc = wait_event_interruptible(ses->server->response_q,
860 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
861 ((ses->server->tcpStatus != CifsGood) &&
862 (ses->server->tcpStatus != CifsNew)));
863
864 /* Were we interrupted by a signal ? */
865 if ((rc == -ERESTARTSYS) &&
866 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
867 ((ses->server->tcpStatus == CifsGood) ||
868 (ses->server->tcpStatus == CifsNew))) {
869
870 if (in_buf->Command == SMB_COM_TRANSACTION2) {
871 /* POSIX lock. We send a NT_CANCEL SMB to cause the
872 blocking lock to return. */
873 rc = send_cancel(ses->server, in_buf, midQ);
874 if (rc) {
875 delete_mid(midQ);
876 return rc;
877 }
878 } else {
879 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
880 to cause the blocking lock to return. */
881
882 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
883
884 /* If we get -ENOLCK back the lock may have
885 already been removed. Don't exit in this case. */
886 if (rc && rc != -ENOLCK) {
887 delete_mid(midQ);
888 return rc;
889 }
890 }
891
892 rc = wait_for_response(ses->server, midQ);
893 if (rc) {
894 send_cancel(ses->server, in_buf, midQ);
895 spin_lock(&GlobalMid_Lock);
896 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
897 /* no longer considered to be "in-flight" */
898 midQ->callback = DeleteMidQEntry;
899 spin_unlock(&GlobalMid_Lock);
900 return rc;
901 }
902 spin_unlock(&GlobalMid_Lock);
903 }
904
905 /* We got the response - restart system call. */
906 rstart = 1;
907 }
908
909 rc = cifs_sync_mid_result(midQ, ses->server);
910 if (rc != 0)
911 return rc;
912
913 /* rcvd frame is ok */
914 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
915 rc = -EIO;
916 cERROR(1, "Bad MID state?");
917 goto out;
918 }
919
920 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
921 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
922 rc = cifs_check_receive(midQ, ses->server, 0);
923 out:
924 delete_mid(midQ);
925 if (rstart && rc == -EACCES)
926 return -ERESTARTSYS;
927 return rc;
928 }
This page took 0.068522 seconds and 6 git commands to generate.