target/rd: reduce code duplication in rd_execute_rw()
[deliverable/linux.git] / drivers / target / target_core_xcopy.c
CommitLineData
cbf031f4
NB
1/*******************************************************************************
2 * Filename: target_core_xcopy.c
3 *
4 * This file contains support for SPC-4 Extended-Copy offload with generic
5 * TCM backends.
6 *
7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
8 *
9 * Author:
10 * Nicholas A. Bellinger <nab@daterainc.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ******************************************************************************/
23
cbf031f4
NB
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/list.h>
27#include <linux/configfs.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <asm/unaligned.h>
31
32#include <target/target_core_base.h>
33#include <target/target_core_backend.h>
34#include <target/target_core_fabric.h>
35#include <target/target_core_configfs.h>
36
b13876d2 37#include "target_core_internal.h"
cbf031f4
NB
38#include "target_core_pr.h"
39#include "target_core_ua.h"
40#include "target_core_xcopy.h"
41
42static struct workqueue_struct *xcopy_wq = NULL;
cbf031f4
NB
43
44static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
45{
46 int off = 0;
47
48 buf[off++] = (0x6 << 4);
49 buf[off++] = 0x01;
50 buf[off++] = 0x40;
51 buf[off] = (0x5 << 4);
52
53 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
54 return 0;
55}
56
57static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
58 bool src)
59{
60 struct se_device *se_dev;
61 struct configfs_subsystem *subsys = target_core_subsystem[0];
62 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
63 int rc;
64
0bcc297e 65 if (src)
cbf031f4
NB
66 dev_wwn = &xop->dst_tid_wwn[0];
67 else
68 dev_wwn = &xop->src_tid_wwn[0];
69
70 mutex_lock(&g_device_mutex);
71 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
72
acb3f260
NB
73 if (!se_dev->dev_attrib.emulate_3pc)
74 continue;
75
cbf031f4
NB
76 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
77 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
78
79 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
80 if (rc != 0)
81 continue;
82
0bcc297e 83 if (src) {
cbf031f4
NB
84 xop->dst_dev = se_dev;
85 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
86 " se_dev\n", xop->dst_dev);
87 } else {
88 xop->src_dev = se_dev;
89 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
90 " se_dev\n", xop->src_dev);
91 }
92
93 rc = configfs_depend_item(subsys,
94 &se_dev->dev_group.cg_item);
95 if (rc != 0) {
96 pr_err("configfs_depend_item attempt failed:"
97 " %d for se_dev: %p\n", rc, se_dev);
98 mutex_unlock(&g_device_mutex);
99 return rc;
100 }
101
102 pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
103 " se_dev->se_dev_group: %p\n", subsys, se_dev,
104 &se_dev->dev_group);
105
106 mutex_unlock(&g_device_mutex);
107 return 0;
108 }
109 mutex_unlock(&g_device_mutex);
110
111 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
112 return -EINVAL;
113}
114
115static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
116 unsigned char *p, bool src)
117{
118 unsigned char *desc = p;
119 unsigned short ript;
120 u8 desig_len;
121 /*
122 * Extract RELATIVE INITIATOR PORT IDENTIFIER
123 */
124 ript = get_unaligned_be16(&desc[2]);
125 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
126 /*
127 * Check for supported code set, association, and designator type
128 */
129 if ((desc[4] & 0x0f) != 0x1) {
130 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
131 return -EINVAL;
132 }
133 if ((desc[5] & 0x30) != 0x00) {
134 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
135 return -EINVAL;
136 }
137 if ((desc[5] & 0x0f) != 0x3) {
138 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
139 (desc[5] & 0x0f));
140 return -EINVAL;
141 }
142 /*
143 * Check for matching 16 byte length for NAA IEEE Registered Extended
144 * Assigned designator
145 */
146 desig_len = desc[7];
147 if (desig_len != 16) {
148 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
149 return -EINVAL;
150 }
151 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
152 /*
153 * Check for NAA IEEE Registered Extended Assigned header..
154 */
155 if ((desc[8] & 0xf0) != 0x60) {
156 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
157 (desc[8] & 0xf0));
158 return -EINVAL;
159 }
160
0bcc297e 161 if (src) {
cbf031f4
NB
162 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
163 /*
164 * Determine if the source designator matches the local device
165 */
166 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
167 XCOPY_NAA_IEEE_REGEX_LEN)) {
168 xop->op_origin = XCOL_SOURCE_RECV_OP;
169 xop->src_dev = se_cmd->se_dev;
170 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
171 " received xop\n", xop->src_dev);
172 }
173 } else {
174 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
175 /*
176 * Determine if the destination designator matches the local device
177 */
178 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
179 XCOPY_NAA_IEEE_REGEX_LEN)) {
180 xop->op_origin = XCOL_DEST_RECV_OP;
181 xop->dst_dev = se_cmd->se_dev;
182 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
183 " received xop\n", xop->dst_dev);
184 }
185 }
186
187 return 0;
188}
189
190static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
191 struct xcopy_op *xop, unsigned char *p,
192 unsigned short tdll)
193{
194 struct se_device *local_dev = se_cmd->se_dev;
195 unsigned char *desc = p;
196 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
197 unsigned short start = 0;
198 bool src = true;
199
200 if (offset != 0) {
201 pr_err("XCOPY target descriptor list length is not"
202 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
203 return -EINVAL;
204 }
205 if (tdll > 64) {
206 pr_err("XCOPY target descriptor supports a maximum"
207 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
208 return -EINVAL;
209 }
210 /*
211 * Generate an IEEE Registered Extended designator based upon the
212 * se_device the XCOPY was received upon..
213 */
214 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
215 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
216
217 while (start < tdll) {
218 /*
219 * Check target descriptor identification with 0xE4 type with
220 * use VPD 0x83 WWPN matching ..
221 */
222 switch (desc[0]) {
223 case 0xe4:
224 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
225 &desc[0], src);
226 if (rc != 0)
227 goto out;
228 /*
229 * Assume target descriptors are in source -> destination order..
230 */
0bcc297e 231 if (src)
cbf031f4
NB
232 src = false;
233 else
234 src = true;
235 start += XCOPY_TARGET_DESC_LEN;
236 desc += XCOPY_TARGET_DESC_LEN;
237 ret++;
238 break;
239 default:
240 pr_err("XCOPY unsupported descriptor type code:"
241 " 0x%02x\n", desc[0]);
242 goto out;
243 }
244 }
245
246 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
247 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
248 else
249 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
250
251 if (rc < 0)
252 goto out;
253
254 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
255 xop->src_dev, &xop->src_tid_wwn[0]);
256 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
257 xop->dst_dev, &xop->dst_tid_wwn[0]);
258
259 return ret;
260
261out:
262 return -EINVAL;
263}
264
265static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
266 unsigned char *p)
267{
268 unsigned char *desc = p;
269 int dc = (desc[1] & 0x02);
270 unsigned short desc_len;
271
272 desc_len = get_unaligned_be16(&desc[2]);
273 if (desc_len != 0x18) {
274 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
275 " %hu\n", desc_len);
276 return -EINVAL;
277 }
278
279 xop->stdi = get_unaligned_be16(&desc[4]);
280 xop->dtdi = get_unaligned_be16(&desc[6]);
281 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
282 desc_len, xop->stdi, xop->dtdi, dc);
283
284 xop->nolb = get_unaligned_be16(&desc[10]);
285 xop->src_lba = get_unaligned_be64(&desc[12]);
286 xop->dst_lba = get_unaligned_be64(&desc[20]);
287 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
288 xop->nolb, (unsigned long long)xop->src_lba,
289 (unsigned long long)xop->dst_lba);
290
291 if (dc != 0) {
3e9e01de
NB
292 xop->dbl = (desc[29] & 0xff) << 16;
293 xop->dbl |= (desc[30] & 0xff) << 8;
cbf031f4
NB
294 xop->dbl |= desc[31] & 0xff;
295
296 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
297 }
298 return 0;
299}
300
301static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
302 struct xcopy_op *xop, unsigned char *p,
303 unsigned int sdll)
304{
305 unsigned char *desc = p;
306 unsigned int start = 0;
307 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
308
309 if (offset != 0) {
310 pr_err("XCOPY segment descriptor list length is not"
311 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
312 return -EINVAL;
313 }
314
315 while (start < sdll) {
316 /*
317 * Check segment descriptor type code for block -> block
318 */
319 switch (desc[0]) {
320 case 0x02:
321 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
322 if (rc < 0)
323 goto out;
324
325 ret++;
326 start += XCOPY_SEGMENT_DESC_LEN;
327 desc += XCOPY_SEGMENT_DESC_LEN;
328 break;
329 default:
6774def6 330 pr_err("XCOPY unsupported segment descriptor"
cbf031f4
NB
331 "type: 0x%02x\n", desc[0]);
332 goto out;
333 }
334 }
335
336 return ret;
337
338out:
339 return -EINVAL;
340}
341
342/*
343 * Start xcopy_pt ops
344 */
345
346struct xcopy_pt_cmd {
347 bool remote_port;
348 struct se_cmd se_cmd;
349 struct xcopy_op *xcopy_op;
350 struct completion xpt_passthrough_sem;
366bda19 351 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
cbf031f4
NB
352};
353
354static struct se_port xcopy_pt_port;
355static struct se_portal_group xcopy_pt_tpg;
356static struct se_session xcopy_pt_sess;
357static struct se_node_acl xcopy_pt_nacl;
358
359static char *xcopy_pt_get_fabric_name(void)
360{
361 return "xcopy-pt";
362}
363
364static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
365{
366 return 0;
367}
368
369static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
370{
371 return 0;
372}
373
374static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
375{
376 struct configfs_subsystem *subsys = target_core_subsystem[0];
377 struct se_device *remote_dev;
378
379 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
380 remote_dev = xop->dst_dev;
381 else
382 remote_dev = xop->src_dev;
383
384 pr_debug("Calling configfs_undepend_item for subsys: %p"
385 " remote_dev: %p remote_dev->dev_group: %p\n",
386 subsys, remote_dev, &remote_dev->dev_group.cg_item);
387
388 configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
389}
390
391static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
392{
393 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
394 struct xcopy_pt_cmd, se_cmd);
395
cbf031f4
NB
396 kfree(xpt_cmd);
397}
398
399static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
400{
401 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
402 struct xcopy_pt_cmd, se_cmd);
403
404 complete(&xpt_cmd->xpt_passthrough_sem);
405 return 0;
406}
407
408static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
409{
410 return 0;
411}
412
413static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
414{
415 return 0;
416}
417
418static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
419{
420 return 0;
421}
422
423static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
424{
425 return 0;
426}
427
428static struct target_core_fabric_ops xcopy_pt_tfo = {
429 .get_fabric_name = xcopy_pt_get_fabric_name,
430 .get_task_tag = xcopy_pt_get_tag,
431 .get_cmd_state = xcopy_pt_get_cmd_state,
432 .release_cmd = xcopy_pt_release_cmd,
433 .check_stop_free = xcopy_pt_check_stop_free,
434 .write_pending = xcopy_pt_write_pending,
435 .write_pending_status = xcopy_pt_write_pending_status,
436 .queue_data_in = xcopy_pt_queue_data_in,
437 .queue_status = xcopy_pt_queue_status,
438};
439
440/*
441 * End xcopy_pt_ops
442 */
443
444int target_xcopy_setup_pt(void)
445{
446 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
447 if (!xcopy_wq) {
448 pr_err("Unable to allocate xcopy_wq\n");
449 return -ENOMEM;
450 }
451
452 memset(&xcopy_pt_port, 0, sizeof(struct se_port));
453 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
454 INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
455 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
456
457 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
458 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
459 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
460 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
461
462 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
463 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
464
465 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
466 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
467 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
468 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
469 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
470 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
471
472 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
473 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
474
475 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
476 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
477
478 return 0;
479}
480
481void target_xcopy_release_pt(void)
482{
483 if (xcopy_wq)
484 destroy_workqueue(xcopy_wq);
485}
486
487static void target_xcopy_setup_pt_port(
488 struct xcopy_pt_cmd *xpt_cmd,
489 struct xcopy_op *xop,
490 bool remote_port)
491{
492 struct se_cmd *ec_cmd = xop->xop_se_cmd;
493 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
494
495 if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
496 /*
497 * Honor destination port reservations for X-COPY PUSH emulation
498 * when CDB is received on local source port, and READs blocks to
499 * WRITE on remote destination port.
500 */
501 if (remote_port) {
502 xpt_cmd->remote_port = remote_port;
503 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
504 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
505 " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
506 pt_cmd->se_lun->lun_sep);
507 } else {
508 pt_cmd->se_lun = ec_cmd->se_lun;
509 pt_cmd->se_dev = ec_cmd->se_dev;
510
511 pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
512 " %p\n", pt_cmd->se_dev);
513 pt_cmd->se_lun = ec_cmd->se_lun;
514 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
515 pt_cmd->se_lun);
516 }
517 } else {
518 /*
519 * Honor source port reservation for X-COPY PULL emulation
520 * when CDB is received on local desintation port, and READs
521 * blocks from the remote source port to WRITE on local
522 * destination port.
523 */
524 if (remote_port) {
525 xpt_cmd->remote_port = remote_port;
526 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
527 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
528 " cmd->se_lun->lun_sep for X-COPY data PULL\n",
529 pt_cmd->se_lun->lun_sep);
530 } else {
531 pt_cmd->se_lun = ec_cmd->se_lun;
532 pt_cmd->se_dev = ec_cmd->se_dev;
533
534 pr_debug("Honoring local DST port from ec_cmd->se_dev:"
535 " %p\n", pt_cmd->se_dev);
536 pt_cmd->se_lun = ec_cmd->se_lun;
537 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
538 pt_cmd->se_lun);
539 }
540 }
541}
542
543static int target_xcopy_init_pt_lun(
544 struct xcopy_pt_cmd *xpt_cmd,
545 struct xcopy_op *xop,
546 struct se_device *se_dev,
547 struct se_cmd *pt_cmd,
548 bool remote_port)
549{
550 /*
551 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
552 * reservations. The pt_cmd->se_lun pointer will be setup from within
553 * target_xcopy_setup_pt_port()
554 */
0bcc297e 555 if (!remote_port) {
cbf031f4
NB
556 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
557 return 0;
558 }
559
4863e525 560 pt_cmd->se_lun = &se_dev->xcopy_lun;
cbf031f4
NB
561 pt_cmd->se_dev = se_dev;
562
563 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
cbf031f4
NB
564 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
565
566 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
567 pt_cmd->se_lun->lun_se_dev);
568
569 return 0;
570}
571
572static int target_xcopy_setup_pt_cmd(
573 struct xcopy_pt_cmd *xpt_cmd,
574 struct xcopy_op *xop,
575 struct se_device *se_dev,
576 unsigned char *cdb,
577 bool remote_port,
578 bool alloc_mem)
579{
580 struct se_cmd *cmd = &xpt_cmd->se_cmd;
581 sense_reason_t sense_rc;
582 int ret = 0, rc;
583 /*
584 * Setup LUN+port to honor reservations based upon xop->op_origin for
585 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
586 */
587 rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
588 if (rc < 0) {
589 ret = rc;
590 goto out;
591 }
592 xpt_cmd->xcopy_op = xop;
593 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
594
595 sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
596 if (sense_rc) {
597 ret = -EINVAL;
598 goto out;
599 }
600
601 if (alloc_mem) {
602 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
603 cmd->data_length, false);
604 if (rc < 0) {
605 ret = rc;
606 goto out;
607 }
608 /*
609 * Set this bit so that transport_free_pages() allows the
610 * caller to release SGLs + physical memory allocated by
611 * transport_generic_get_mem()..
612 */
613 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
614 } else {
615 /*
616 * Here the previously allocated SGLs for the internal READ
617 * are mapped zero-copy to the internal WRITE.
618 */
619 sense_rc = transport_generic_map_mem_to_cmd(cmd,
620 xop->xop_data_sg, xop->xop_data_nents,
621 NULL, 0);
622 if (sense_rc) {
623 ret = -EINVAL;
624 goto out;
625 }
626
627 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
628 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
629 }
630
631 return 0;
632
633out:
cbf031f4
NB
634 return ret;
635}
636
637static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
638{
639 struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
640 sense_reason_t sense_rc;
641
642 sense_rc = transport_generic_new_cmd(se_cmd);
643 if (sense_rc)
644 return -EINVAL;
645
646 if (se_cmd->data_direction == DMA_TO_DEVICE)
647 target_execute_cmd(se_cmd);
648
649 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
650
651 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
652 se_cmd->scsi_status);
8a955d6d
NB
653
654 return (se_cmd->scsi_status) ? -EINVAL : 0;
cbf031f4
NB
655}
656
657static int target_xcopy_read_source(
658 struct se_cmd *ec_cmd,
659 struct xcopy_op *xop,
660 struct se_device *src_dev,
661 sector_t src_lba,
662 u32 src_sectors)
663{
664 struct xcopy_pt_cmd *xpt_cmd;
665 struct se_cmd *se_cmd;
666 u32 length = (src_sectors * src_dev->dev_attrib.block_size);
667 int rc;
668 unsigned char cdb[16];
669 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
670
671 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
672 if (!xpt_cmd) {
673 pr_err("Unable to allocate xcopy_pt_cmd\n");
674 return -ENOMEM;
675 }
676 init_completion(&xpt_cmd->xpt_passthrough_sem);
677 se_cmd = &xpt_cmd->se_cmd;
678
679 memset(&cdb[0], 0, 16);
680 cdb[0] = READ_16;
681 put_unaligned_be64(src_lba, &cdb[2]);
682 put_unaligned_be32(src_sectors, &cdb[10]);
683 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
684 (unsigned long long)src_lba, src_sectors, length);
685
686 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
366bda19 687 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
cbf031f4
NB
688 xop->src_pt_cmd = xpt_cmd;
689
690 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
691 remote_port, true);
692 if (rc < 0) {
693 transport_generic_free_cmd(se_cmd, 0);
694 return rc;
695 }
696
697 xop->xop_data_sg = se_cmd->t_data_sg;
698 xop->xop_data_nents = se_cmd->t_data_nents;
699 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
700 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
701
702 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
703 if (rc < 0) {
704 transport_generic_free_cmd(se_cmd, 0);
705 return rc;
706 }
707 /*
708 * Clear off the allocated t_data_sg, that has been saved for
709 * zero-copy WRITE submission reuse in struct xcopy_op..
710 */
711 se_cmd->t_data_sg = NULL;
712 se_cmd->t_data_nents = 0;
713
714 return 0;
715}
716
717static int target_xcopy_write_destination(
718 struct se_cmd *ec_cmd,
719 struct xcopy_op *xop,
720 struct se_device *dst_dev,
721 sector_t dst_lba,
722 u32 dst_sectors)
723{
724 struct xcopy_pt_cmd *xpt_cmd;
725 struct se_cmd *se_cmd;
726 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
727 int rc;
728 unsigned char cdb[16];
729 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
730
731 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
732 if (!xpt_cmd) {
733 pr_err("Unable to allocate xcopy_pt_cmd\n");
734 return -ENOMEM;
735 }
736 init_completion(&xpt_cmd->xpt_passthrough_sem);
737 se_cmd = &xpt_cmd->se_cmd;
738
739 memset(&cdb[0], 0, 16);
740 cdb[0] = WRITE_16;
741 put_unaligned_be64(dst_lba, &cdb[2]);
742 put_unaligned_be32(dst_sectors, &cdb[10]);
743 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
744 (unsigned long long)dst_lba, dst_sectors, length);
745
746 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
366bda19 747 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
cbf031f4
NB
748 xop->dst_pt_cmd = xpt_cmd;
749
750 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
751 remote_port, false);
752 if (rc < 0) {
753 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
754 /*
755 * If the failure happened before the t_mem_list hand-off in
756 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
757 * core releases this memory on error during X-COPY WRITE I/O.
758 */
759 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
760 src_cmd->t_data_sg = xop->xop_data_sg;
761 src_cmd->t_data_nents = xop->xop_data_nents;
762
763 transport_generic_free_cmd(se_cmd, 0);
764 return rc;
765 }
766
767 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
768 if (rc < 0) {
769 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
770 transport_generic_free_cmd(se_cmd, 0);
771 return rc;
772 }
773
774 return 0;
775}
776
777static void target_xcopy_do_work(struct work_struct *work)
778{
779 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
780 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
781 struct se_cmd *ec_cmd = xop->xop_se_cmd;
782 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
783 unsigned int max_sectors;
784 int rc;
785 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
786
787 end_lba = src_lba + nolb;
788 /*
789 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
790 * smallest max_sectors between src_dev + dev_dev, or
791 */
792 max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
793 dst_dev->dev_attrib.hw_max_sectors);
794 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
795
796 max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
797
798 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
799 nolb, max_nolb, (unsigned long long)end_lba);
800 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
801 (unsigned long long)src_lba, (unsigned long long)dst_lba);
802
803 while (src_lba < end_lba) {
804 cur_nolb = min(nolb, max_nolb);
805
806 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
807 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
808
809 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
810 if (rc < 0)
811 goto out;
812
813 src_lba += cur_nolb;
814 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
815 (unsigned long long)src_lba);
816
817 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
818 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
819
820 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
821 dst_lba, cur_nolb);
822 if (rc < 0) {
823 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
824 goto out;
825 }
826
827 dst_lba += cur_nolb;
828 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
829 (unsigned long long)dst_lba);
830
831 copied_nolb += cur_nolb;
832 nolb -= cur_nolb;
833
834 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
835 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
836
837 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
838 }
839
840 xcopy_pt_undepend_remotedev(xop);
841 kfree(xop);
842
843 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
844 (unsigned long long)src_lba, (unsigned long long)dst_lba);
845 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
846 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
847
848 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
849 target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
850 return;
851
852out:
853 xcopy_pt_undepend_remotedev(xop);
854 kfree(xop);
855
856 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
857 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
858 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
859}
860
861sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
862{
acb3f260 863 struct se_device *dev = se_cmd->se_dev;
cbf031f4
NB
864 struct xcopy_op *xop = NULL;
865 unsigned char *p = NULL, *seg_desc;
866 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
48502ddb 867 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
cbf031f4
NB
868 int rc;
869 unsigned short tdll;
870
acb3f260
NB
871 if (!dev->dev_attrib.emulate_3pc) {
872 pr_err("EXTENDED_COPY operation explicitly disabled\n");
873 return TCM_UNSUPPORTED_SCSI_OPCODE;
874 }
875
cbf031f4
NB
876 sa = se_cmd->t_task_cdb[1] & 0x1f;
877 if (sa != 0x00) {
878 pr_err("EXTENDED_COPY(LID4) not supported\n");
879 return TCM_UNSUPPORTED_SCSI_OPCODE;
880 }
881
934a138e
NB
882 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
883 if (!xop) {
884 pr_err("Unable to allocate xcopy_op\n");
885 return TCM_OUT_OF_RESOURCES;
886 }
887 xop->xop_se_cmd = se_cmd;
888
cbf031f4
NB
889 p = transport_kmap_data_sg(se_cmd);
890 if (!p) {
891 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
934a138e 892 kfree(xop);
cbf031f4
NB
893 return TCM_OUT_OF_RESOURCES;
894 }
895
896 list_id = p[0];
3f7a46c6
NB
897 list_id_usage = (p[1] & 0x18) >> 3;
898
cbf031f4
NB
899 /*
900 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
901 */
902 tdll = get_unaligned_be16(&p[2]);
903 sdll = get_unaligned_be32(&p[8]);
904
905 inline_dl = get_unaligned_be32(&p[12]);
906 if (inline_dl != 0) {
907 pr_err("XCOPY with non zero inline data length\n");
908 goto out;
909 }
910
cbf031f4
NB
911 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
912 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
913 tdll, sdll, inline_dl);
914
915 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
916 if (rc <= 0)
917 goto out;
918
48502ddb
NB
919 if (xop->src_dev->dev_attrib.block_size !=
920 xop->dst_dev->dev_attrib.block_size) {
921 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
922 " block_size: %u currently unsupported\n",
923 xop->src_dev->dev_attrib.block_size,
924 xop->dst_dev->dev_attrib.block_size);
925 xcopy_pt_undepend_remotedev(xop);
926 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
927 goto out;
928 }
929
cbf031f4
NB
930 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
931 rc * XCOPY_TARGET_DESC_LEN);
932 seg_desc = &p[16];
933 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
934
935 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
936 if (rc <= 0) {
937 xcopy_pt_undepend_remotedev(xop);
938 goto out;
939 }
940 transport_kunmap_data_sg(se_cmd);
941
942 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
943 rc * XCOPY_SEGMENT_DESC_LEN);
944 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
945 queue_work(xcopy_wq, &xop->xop_work);
946 return TCM_NO_SENSE;
947
948out:
949 if (p)
950 transport_kunmap_data_sg(se_cmd);
951 kfree(xop);
48502ddb 952 return ret;
cbf031f4
NB
953}
954
955static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
956{
957 unsigned char *p;
958
959 p = transport_kmap_data_sg(se_cmd);
960 if (!p) {
961 pr_err("transport_kmap_data_sg failed in"
962 " target_rcr_operating_parameters\n");
963 return TCM_OUT_OF_RESOURCES;
964 }
965
966 if (se_cmd->data_length < 54) {
967 pr_err("Receive Copy Results Op Parameters length"
968 " too small: %u\n", se_cmd->data_length);
969 transport_kunmap_data_sg(se_cmd);
970 return TCM_INVALID_CDB_FIELD;
971 }
972 /*
973 * Set SNLID=1 (Supports no List ID)
974 */
975 p[4] = 0x1;
976 /*
977 * MAXIMUM TARGET DESCRIPTOR COUNT
978 */
979 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
980 /*
981 * MAXIMUM SEGMENT DESCRIPTOR COUNT
982 */
983 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
984 /*
985 * MAXIMUM DESCRIPTOR LIST LENGTH
986 */
987 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
988 /*
989 * MAXIMUM SEGMENT LENGTH
990 */
991 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
992 /*
993 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
994 */
995 put_unaligned_be32(0x0, &p[20]);
996 /*
997 * HELD DATA LIMIT
998 */
999 put_unaligned_be32(0x0, &p[24]);
1000 /*
1001 * MAXIMUM STREAM DEVICE TRANSFER SIZE
1002 */
1003 put_unaligned_be32(0x0, &p[28]);
1004 /*
1005 * TOTAL CONCURRENT COPIES
1006 */
1007 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
1008 /*
1009 * MAXIMUM CONCURRENT COPIES
1010 */
1011 p[36] = RCR_OP_MAX_CONCURR_COPIES;
1012 /*
1013 * DATA SEGMENT GRANULARITY (log 2)
1014 */
1015 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
1016 /*
1017 * INLINE DATA GRANULARITY log 2)
1018 */
1019 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
1020 /*
1021 * HELD DATA GRANULARITY
1022 */
1023 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1024 /*
1025 * IMPLEMENTED DESCRIPTOR LIST LENGTH
1026 */
1027 p[43] = 0x2;
1028 /*
1029 * List of implemented descriptor type codes (ordered)
1030 */
1031 p[44] = 0x02; /* Copy Block to Block device */
1032 p[45] = 0xe4; /* Identification descriptor target descriptor */
1033
1034 /*
1035 * AVAILABLE DATA (n-3)
1036 */
1037 put_unaligned_be32(42, &p[0]);
1038
1039 transport_kunmap_data_sg(se_cmd);
1040 target_complete_cmd(se_cmd, GOOD);
1041
1042 return TCM_NO_SENSE;
1043}
1044
1045sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1046{
1047 unsigned char *cdb = &se_cmd->t_task_cdb[0];
1048 int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1049 sense_reason_t rc = TCM_NO_SENSE;
1050
1051 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1052 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1053
1054 if (list_id != 0) {
1055 pr_err("Receive Copy Results with non zero list identifier"
1056 " not supported\n");
1057 return TCM_INVALID_CDB_FIELD;
1058 }
1059
1060 switch (sa) {
1061 case RCR_SA_OPERATING_PARAMETERS:
1062 rc = target_rcr_operating_parameters(se_cmd);
1063 break;
1064 case RCR_SA_COPY_STATUS:
1065 case RCR_SA_RECEIVE_DATA:
1066 case RCR_SA_FAILED_SEGMENT_DETAILS:
1067 default:
1068 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1069 return TCM_INVALID_CDB_FIELD;
1070 }
1071
1072 return rc;
1073}
This page took 0.116071 seconds and 5 git commands to generate.