tcm_vhost: Use iov_num_pages to calculate sgl_count
[deliverable/linux.git] / drivers / vhost / tcm_vhost.c
CommitLineData
057cbf49
NB
1/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/kthread.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/configfs.h>
34#include <linux/ctype.h>
35#include <linux/compat.h>
36#include <linux/eventfd.h>
057cbf49
NB
37#include <linux/fs.h>
38#include <linux/miscdevice.h>
39#include <asm/unaligned.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h>
43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47#include <linux/vhost.h>
48#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49#include <linux/virtio_scsi.h>
9d6064a3 50#include <linux/llist.h>
057cbf49
NB
51
52#include "vhost.c"
53#include "vhost.h"
54#include "tcm_vhost.h"
55
101998f6
NB
56enum {
57 VHOST_SCSI_VQ_CTL = 0,
58 VHOST_SCSI_VQ_EVT = 1,
59 VHOST_SCSI_VQ_IO = 2,
60};
61
057cbf49 62struct vhost_scsi {
101998f6 63 struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
057cbf49
NB
64 struct vhost_dev dev;
65 struct vhost_virtqueue vqs[3];
66
67 struct vhost_work vs_completion_work; /* cmd completion work item */
9d6064a3 68 struct llist_head vs_completion_list; /* cmd completion queue */
057cbf49
NB
69};
70
71/* Local pointer to allocated TCM configfs fabric module */
72static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
73
74static struct workqueue_struct *tcm_vhost_workqueue;
75
76/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
77static DEFINE_MUTEX(tcm_vhost_mutex);
78static LIST_HEAD(tcm_vhost_list);
79
765b34fd
AH
80static int iov_num_pages(struct iovec *iov)
81{
82 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
83 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
84}
85
057cbf49
NB
86static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
87{
88 return 1;
89}
90
91static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
92{
93 return 0;
94}
95
96static char *tcm_vhost_get_fabric_name(void)
97{
98 return "vhost";
99}
100
101static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
102{
103 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
104 struct tcm_vhost_tpg, se_tpg);
105 struct tcm_vhost_tport *tport = tpg->tport;
106
107 switch (tport->tport_proto_id) {
108 case SCSI_PROTOCOL_SAS:
109 return sas_get_fabric_proto_ident(se_tpg);
110 case SCSI_PROTOCOL_FCP:
111 return fc_get_fabric_proto_ident(se_tpg);
112 case SCSI_PROTOCOL_ISCSI:
113 return iscsi_get_fabric_proto_ident(se_tpg);
114 default:
115 pr_err("Unknown tport_proto_id: 0x%02x, using"
116 " SAS emulation\n", tport->tport_proto_id);
117 break;
118 }
119
120 return sas_get_fabric_proto_ident(se_tpg);
121}
122
123static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
124{
125 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
126 struct tcm_vhost_tpg, se_tpg);
127 struct tcm_vhost_tport *tport = tpg->tport;
128
129 return &tport->tport_name[0];
130}
131
132static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
133{
134 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
135 struct tcm_vhost_tpg, se_tpg);
136 return tpg->tport_tpgt;
137}
138
139static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
140{
141 return 1;
142}
143
101998f6 144static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
145 struct se_node_acl *se_nacl,
146 struct t10_pr_registration *pr_reg,
147 int *format_code,
148 unsigned char *buf)
149{
150 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
151 struct tcm_vhost_tpg, se_tpg);
152 struct tcm_vhost_tport *tport = tpg->tport;
153
154 switch (tport->tport_proto_id) {
155 case SCSI_PROTOCOL_SAS:
156 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
157 format_code, buf);
158 case SCSI_PROTOCOL_FCP:
159 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
160 format_code, buf);
161 case SCSI_PROTOCOL_ISCSI:
162 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
163 format_code, buf);
164 default:
165 pr_err("Unknown tport_proto_id: 0x%02x, using"
166 " SAS emulation\n", tport->tport_proto_id);
167 break;
168 }
169
170 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
171 format_code, buf);
172}
173
101998f6 174static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
057cbf49
NB
175 struct se_node_acl *se_nacl,
176 struct t10_pr_registration *pr_reg,
177 int *format_code)
178{
179 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
180 struct tcm_vhost_tpg, se_tpg);
181 struct tcm_vhost_tport *tport = tpg->tport;
182
183 switch (tport->tport_proto_id) {
184 case SCSI_PROTOCOL_SAS:
185 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
186 format_code);
187 case SCSI_PROTOCOL_FCP:
188 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
189 format_code);
190 case SCSI_PROTOCOL_ISCSI:
191 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
192 format_code);
193 default:
194 pr_err("Unknown tport_proto_id: 0x%02x, using"
195 " SAS emulation\n", tport->tport_proto_id);
196 break;
197 }
198
199 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
200 format_code);
201}
202
101998f6 203static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
204 const char *buf,
205 u32 *out_tid_len,
206 char **port_nexus_ptr)
207{
208 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
209 struct tcm_vhost_tpg, se_tpg);
210 struct tcm_vhost_tport *tport = tpg->tport;
211
212 switch (tport->tport_proto_id) {
213 case SCSI_PROTOCOL_SAS:
214 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
215 port_nexus_ptr);
216 case SCSI_PROTOCOL_FCP:
217 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
218 port_nexus_ptr);
219 case SCSI_PROTOCOL_ISCSI:
220 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
221 port_nexus_ptr);
222 default:
223 pr_err("Unknown tport_proto_id: 0x%02x, using"
224 " SAS emulation\n", tport->tport_proto_id);
225 break;
226 }
227
228 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
229 port_nexus_ptr);
230}
231
232static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
233 struct se_portal_group *se_tpg)
234{
235 struct tcm_vhost_nacl *nacl;
236
237 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
238 if (!nacl) {
744627e9 239 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
057cbf49
NB
240 return NULL;
241 }
242
243 return &nacl->se_node_acl;
244}
245
101998f6 246static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
057cbf49
NB
247 struct se_node_acl *se_nacl)
248{
249 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
250 struct tcm_vhost_nacl, se_node_acl);
251 kfree(nacl);
252}
253
254static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
255{
256 return 1;
257}
258
259static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
260{
261 return;
262}
263
264static int tcm_vhost_shutdown_session(struct se_session *se_sess)
265{
266 return 0;
267}
268
269static void tcm_vhost_close_session(struct se_session *se_sess)
270{
271 return;
272}
273
274static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
275{
276 return 0;
277}
278
279static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
280{
281 /* Go ahead and process the write immediately */
282 target_execute_cmd(se_cmd);
283 return 0;
284}
285
286static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
287{
288 return 0;
289}
290
291static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
292{
293 return;
294}
295
296static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
297{
298 return 0;
299}
300
301static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
302{
303 return 0;
304}
305
101998f6
NB
306static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
307{
308 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
309
9d6064a3 310 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
101998f6
NB
311
312 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
313}
057cbf49
NB
314
315static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
316{
317 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
318 struct tcm_vhost_cmd, tvc_se_cmd);
319 vhost_scsi_complete_cmd(tv_cmd);
320 return 0;
321}
322
323static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
324{
325 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
326 struct tcm_vhost_cmd, tvc_se_cmd);
327 vhost_scsi_complete_cmd(tv_cmd);
328 return 0;
329}
330
331static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
332{
333 return 0;
334}
335
057cbf49
NB
336static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
337{
338 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
339
340 /* TODO locking against target/backend threads? */
341 transport_generic_free_cmd(se_cmd, 1);
342
343 if (tv_cmd->tvc_sgl_count) {
344 u32 i;
345 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
346 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
347
348 kfree(tv_cmd->tvc_sgl);
349 }
350
351 kfree(tv_cmd);
352}
353
057cbf49
NB
354/* Fill in status and signal that we are done processing this command
355 *
356 * This is scheduled in the vhost work queue so we are called with the owner
357 * process mm and can access the vring.
358 */
359static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
360{
361 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
362 vs_completion_work);
9d6064a3 363 struct virtio_scsi_cmd_resp v_rsp;
057cbf49 364 struct tcm_vhost_cmd *tv_cmd;
9d6064a3
AH
365 struct llist_node *llnode;
366 struct se_cmd *se_cmd;
367 int ret;
057cbf49 368
9d6064a3
AH
369 llnode = llist_del_all(&vs->vs_completion_list);
370 while (llnode) {
371 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
372 tvc_completion_list);
373 llnode = llist_next(llnode);
374 se_cmd = &tv_cmd->tvc_se_cmd;
057cbf49
NB
375
376 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
377 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
378
379 memset(&v_rsp, 0, sizeof(v_rsp));
380 v_rsp.resid = se_cmd->residual_count;
381 /* TODO is status_qualifier field needed? */
382 v_rsp.status = se_cmd->scsi_status;
383 v_rsp.sense_len = se_cmd->scsi_sense_length;
384 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
385 v_rsp.sense_len);
386 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
387 if (likely(ret == 0))
388 vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
389 else
390 pr_err("Faulted on virtio_scsi_cmd_resp\n");
391
392 vhost_scsi_free_cmd(tv_cmd);
393 }
394
395 vhost_signal(&vs->dev, &vs->vqs[2]);
396}
397
057cbf49
NB
398static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
399 struct tcm_vhost_tpg *tv_tpg,
400 struct virtio_scsi_cmd_req *v_req,
401 u32 exp_data_len,
402 int data_direction)
403{
404 struct tcm_vhost_cmd *tv_cmd;
405 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
406
407 tv_nexus = tv_tpg->tpg_nexus;
408 if (!tv_nexus) {
409 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
410 return ERR_PTR(-EIO);
411 }
057cbf49
NB
412
413 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
414 if (!tv_cmd) {
415 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
416 return ERR_PTR(-ENOMEM);
417 }
057cbf49 418 tv_cmd->tvc_tag = v_req->tag;
9f0abc15
NB
419 tv_cmd->tvc_task_attr = v_req->task_attr;
420 tv_cmd->tvc_exp_data_len = exp_data_len;
421 tv_cmd->tvc_data_direction = data_direction;
422 tv_cmd->tvc_nexus = tv_nexus;
057cbf49 423
057cbf49
NB
424 return tv_cmd;
425}
426
427/*
428 * Map a user memory range into a scatterlist
429 *
430 * Returns the number of scatterlist entries used or -errno on error.
431 */
432static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
433 unsigned int sgl_count, void __user *ptr, size_t len, int write)
434{
435 struct scatterlist *sg = sgl;
436 unsigned int npages = 0;
437 int ret;
438
439 while (len > 0) {
440 struct page *page;
441 unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
442 unsigned int nbytes = min_t(unsigned int,
443 PAGE_SIZE - offset, len);
444
445 if (npages == sgl_count) {
446 ret = -ENOBUFS;
447 goto err;
448 }
449
450 ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
451 BUG_ON(ret == 0); /* we should either get our page or fail */
452 if (ret < 0)
453 goto err;
454
455 sg_set_page(sg, page, nbytes, offset);
456 ptr += nbytes;
457 len -= nbytes;
458 sg++;
459 npages++;
460 }
461 return npages;
462
463err:
464 /* Put pages that we hold */
465 for (sg = sgl; sg != &sgl[npages]; sg++)
466 put_page(sg_page(sg));
467 return ret;
468}
469
470static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
471 struct iovec *iov, unsigned int niov, int write)
472{
473 int ret;
474 unsigned int i;
475 u32 sgl_count;
476 struct scatterlist *sg;
477
478 /*
479 * Find out how long sglist needs to be
480 */
481 sgl_count = 0;
f3158f36
AH
482 for (i = 0; i < niov; i++)
483 sgl_count += iov_num_pages(&iov[i]);
484
057cbf49
NB
485 /* TODO overflow checking */
486
487 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
488 if (!sg)
489 return -ENOMEM;
f0e0e9bb
FW
490 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
491 sg, sgl_count, !sg);
057cbf49
NB
492 sg_init_table(sg, sgl_count);
493
494 tv_cmd->tvc_sgl = sg;
495 tv_cmd->tvc_sgl_count = sgl_count;
496
497 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
498 for (i = 0; i < niov; i++) {
499 ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
500 iov[i].iov_len, write);
501 if (ret < 0) {
502 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
503 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
504 kfree(tv_cmd->tvc_sgl);
505 tv_cmd->tvc_sgl = NULL;
506 tv_cmd->tvc_sgl_count = 0;
507 return ret;
508 }
509
510 sg += ret;
511 sgl_count -= ret;
512 }
513 return 0;
514}
515
516static void tcm_vhost_submission_work(struct work_struct *work)
517{
518 struct tcm_vhost_cmd *tv_cmd =
519 container_of(work, struct tcm_vhost_cmd, work);
9f0abc15 520 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
521 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
522 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
523 int rc, sg_no_bidi = 0;
057cbf49
NB
524
525 if (tv_cmd->tvc_sgl_count) {
526 sg_ptr = tv_cmd->tvc_sgl;
057cbf49
NB
527/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
528#if 0
529 if (se_cmd->se_cmd_flags & SCF_BIDI) {
530 sg_bidi_ptr = NULL;
531 sg_no_bidi = 0;
532 }
533#endif
534 } else {
535 sg_ptr = NULL;
536 }
9f0abc15
NB
537 tv_nexus = tv_cmd->tvc_nexus;
538
539 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
540 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
541 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
542 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
543 0, sg_ptr, tv_cmd->tvc_sgl_count,
544 sg_bidi_ptr, sg_no_bidi);
057cbf49
NB
545 if (rc < 0) {
546 transport_send_check_condition_and_sense(se_cmd,
9f0abc15 547 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
057cbf49 548 transport_generic_free_cmd(se_cmd, 0);
057cbf49 549 }
057cbf49
NB
550}
551
552static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
553{
554 struct vhost_virtqueue *vq = &vs->vqs[2];
555 struct virtio_scsi_cmd_req v_req;
556 struct tcm_vhost_tpg *tv_tpg;
557 struct tcm_vhost_cmd *tv_cmd;
558 u32 exp_data_len, data_first, data_num, data_direction;
559 unsigned out, in, i;
560 int head, ret;
561
562 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
563 tv_tpg = vs->vs_tpg;
71f1e45a 564 if (unlikely(!tv_tpg))
057cbf49 565 return;
057cbf49
NB
566
567 mutex_lock(&vq->mutex);
568 vhost_disable_notify(&vs->dev, vq);
569
570 for (;;) {
571 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
572 ARRAY_SIZE(vq->iov), &out, &in,
573 NULL, NULL);
574 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
575 head, out, in);
576 /* On error, stop handling until the next kick. */
577 if (unlikely(head < 0))
578 break;
579 /* Nothing new? Wait for eventfd to tell us they refilled. */
580 if (head == vq->num) {
581 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
582 vhost_disable_notify(&vs->dev, vq);
583 continue;
584 }
585 break;
586 }
587
588/* FIXME: BIDI operation */
589 if (out == 1 && in == 1) {
590 data_direction = DMA_NONE;
591 data_first = 0;
592 data_num = 0;
593 } else if (out == 1 && in > 1) {
594 data_direction = DMA_FROM_DEVICE;
595 data_first = out + 1;
596 data_num = in - 1;
597 } else if (out > 1 && in == 1) {
598 data_direction = DMA_TO_DEVICE;
599 data_first = 1;
600 data_num = out - 1;
601 } else {
602 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
603 out, in);
604 break;
605 }
606
607 /*
608 * Check for a sane resp buffer so we can report errors to
609 * the guest.
610 */
611 if (unlikely(vq->iov[out].iov_len !=
612 sizeof(struct virtio_scsi_cmd_resp))) {
613 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
614 " bytes\n", vq->iov[out].iov_len);
615 break;
616 }
617
618 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
619 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
620 " bytes\n", vq->iov[0].iov_len);
621 break;
622 }
623 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
624 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
625 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
626 sizeof(v_req));
627 if (unlikely(ret)) {
628 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
629 break;
630 }
631
632 exp_data_len = 0;
633 for (i = 0; i < data_num; i++)
634 exp_data_len += vq->iov[data_first + i].iov_len;
635
636 tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
637 exp_data_len, data_direction);
638 if (IS_ERR(tv_cmd)) {
639 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
640 PTR_ERR(tv_cmd));
641 break;
642 }
643 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
644 ": %d\n", tv_cmd, exp_data_len, data_direction);
645
646 tv_cmd->tvc_vhost = vs;
647
648 if (unlikely(vq->iov[out].iov_len !=
649 sizeof(struct virtio_scsi_cmd_resp))) {
650 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
651 " bytes, out: %d, in: %d\n",
652 vq->iov[out].iov_len, out, in);
653 break;
654 }
655
656 tv_cmd->tvc_resp = vq->iov[out].iov_base;
657
658 /*
659 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
660 * that will be used by tcm_vhost_new_cmd_map() and down into
661 * target_setup_cmd_from_cdb()
662 */
663 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
664 /*
665 * Check that the recieved CDB size does not exceeded our
666 * hardcoded max for tcm_vhost
667 */
668 /* TODO what if cdb was too small for varlen cdb header? */
669 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
670 TCM_VHOST_MAX_CDB_SIZE)) {
671 vq_err(vq, "Received SCSI CDB with command_size: %d that"
672 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
673 scsi_command_size(tv_cmd->tvc_cdb),
674 TCM_VHOST_MAX_CDB_SIZE);
675 break; /* TODO */
676 }
677 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
678
679 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
680 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
681
682 if (data_direction != DMA_NONE) {
683 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
684 &vq->iov[data_first], data_num,
685 data_direction == DMA_TO_DEVICE);
686 if (unlikely(ret)) {
687 vq_err(vq, "Failed to map iov to sgl\n");
688 break; /* TODO */
689 }
690 }
691
692 /*
693 * Save the descriptor from vhost_get_vq_desc() to be used to
694 * complete the virtio-scsi request in TCM callback context via
695 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
696 */
697 tv_cmd->tvc_vq_desc = head;
698 /*
699 * Dispatch tv_cmd descriptor for cmwq execution in process
700 * context provided by tcm_vhost_workqueue. This also ensures
701 * tv_cmd is executed on the same kworker CPU as this vhost
702 * thread to gain positive L2 cache locality effects..
703 */
704 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
705 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
706 }
707
708 mutex_unlock(&vq->mutex);
709}
710
711static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
712{
101998f6 713 pr_debug("%s: The handling func for control queue.\n", __func__);
057cbf49
NB
714}
715
716static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
717{
101998f6 718 pr_debug("%s: The handling func for event queue.\n", __func__);
057cbf49
NB
719}
720
721static void vhost_scsi_handle_kick(struct vhost_work *work)
722{
723 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
724 poll.work);
725 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
726
727 vhost_scsi_handle_vq(vs);
728}
729
730/*
731 * Called from vhost_scsi_ioctl() context to walk the list of available
732 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
733 */
734static int vhost_scsi_set_endpoint(
735 struct vhost_scsi *vs,
736 struct vhost_scsi_target *t)
737{
738 struct tcm_vhost_tport *tv_tport;
739 struct tcm_vhost_tpg *tv_tpg;
740 int index;
741
742 mutex_lock(&vs->dev.mutex);
743 /* Verify that ring has been setup correctly. */
744 for (index = 0; index < vs->dev.nvqs; ++index) {
745 /* Verify that ring has been setup correctly. */
746 if (!vhost_vq_access_ok(&vs->vqs[index])) {
747 mutex_unlock(&vs->dev.mutex);
748 return -EFAULT;
749 }
750 }
057cbf49
NB
751 mutex_unlock(&vs->dev.mutex);
752
753 mutex_lock(&tcm_vhost_mutex);
754 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
755 mutex_lock(&tv_tpg->tv_tpg_mutex);
756 if (!tv_tpg->tpg_nexus) {
757 mutex_unlock(&tv_tpg->tv_tpg_mutex);
758 continue;
759 }
101998f6 760 if (tv_tpg->tv_tpg_vhost_count != 0) {
057cbf49
NB
761 mutex_unlock(&tv_tpg->tv_tpg_mutex);
762 continue;
763 }
764 tv_tport = tv_tpg->tport;
765
766 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
767 (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
101998f6 768 tv_tpg->tv_tpg_vhost_count++;
057cbf49
NB
769 mutex_unlock(&tv_tpg->tv_tpg_mutex);
770 mutex_unlock(&tcm_vhost_mutex);
771
772 mutex_lock(&vs->dev.mutex);
101998f6
NB
773 if (vs->vs_tpg) {
774 mutex_unlock(&vs->dev.mutex);
775 mutex_lock(&tv_tpg->tv_tpg_mutex);
776 tv_tpg->tv_tpg_vhost_count--;
777 mutex_unlock(&tv_tpg->tv_tpg_mutex);
778 return -EEXIST;
779 }
780
057cbf49 781 vs->vs_tpg = tv_tpg;
057cbf49
NB
782 smp_mb__after_atomic_inc();
783 mutex_unlock(&vs->dev.mutex);
784 return 0;
785 }
786 mutex_unlock(&tv_tpg->tv_tpg_mutex);
787 }
788 mutex_unlock(&tcm_vhost_mutex);
789 return -EINVAL;
790}
791
792static int vhost_scsi_clear_endpoint(
793 struct vhost_scsi *vs,
794 struct vhost_scsi_target *t)
795{
796 struct tcm_vhost_tport *tv_tport;
797 struct tcm_vhost_tpg *tv_tpg;
101998f6 798 int index, ret;
057cbf49
NB
799
800 mutex_lock(&vs->dev.mutex);
801 /* Verify that ring has been setup correctly. */
802 for (index = 0; index < vs->dev.nvqs; ++index) {
803 if (!vhost_vq_access_ok(&vs->vqs[index])) {
101998f6
NB
804 ret = -EFAULT;
805 goto err;
057cbf49
NB
806 }
807 }
808
809 if (!vs->vs_tpg) {
101998f6
NB
810 ret = -ENODEV;
811 goto err;
057cbf49
NB
812 }
813 tv_tpg = vs->vs_tpg;
814 tv_tport = tv_tpg->tport;
815
816 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
817 (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
057cbf49
NB
818 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
819 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
820 tv_tport->tport_name, tv_tpg->tport_tpgt,
821 t->vhost_wwpn, t->vhost_tpgt);
101998f6
NB
822 ret = -EINVAL;
823 goto err;
057cbf49 824 }
101998f6 825 tv_tpg->tv_tpg_vhost_count--;
057cbf49
NB
826 vs->vs_tpg = NULL;
827 mutex_unlock(&vs->dev.mutex);
828
829 return 0;
101998f6
NB
830
831err:
832 mutex_unlock(&vs->dev.mutex);
833 return ret;
057cbf49
NB
834}
835
836static int vhost_scsi_open(struct inode *inode, struct file *f)
837{
838 struct vhost_scsi *s;
839 int r;
840
841 s = kzalloc(sizeof(*s), GFP_KERNEL);
842 if (!s)
843 return -ENOMEM;
844
845 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
057cbf49 846
101998f6
NB
847 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
848 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
849 s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
057cbf49
NB
850 r = vhost_dev_init(&s->dev, s->vqs, 3);
851 if (r < 0) {
852 kfree(s);
853 return r;
854 }
855
856 f->private_data = s;
857 return 0;
858}
859
860static int vhost_scsi_release(struct inode *inode, struct file *f)
861{
862 struct vhost_scsi *s = f->private_data;
863
864 if (s->vs_tpg && s->vs_tpg->tport) {
865 struct vhost_scsi_target backend;
866
867 memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
868 sizeof(backend.vhost_wwpn));
869 backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
870 vhost_scsi_clear_endpoint(s, &backend);
871 }
872
b211616d 873 vhost_dev_stop(&s->dev);
057cbf49
NB
874 vhost_dev_cleanup(&s->dev, false);
875 kfree(s);
876 return 0;
877}
878
101998f6
NB
879static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
880{
881 vhost_poll_flush(&vs->dev.vqs[index].poll);
882}
883
884static void vhost_scsi_flush(struct vhost_scsi *vs)
885{
886 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
887 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
888 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
889}
890
057cbf49
NB
891static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
892{
893 if (features & ~VHOST_FEATURES)
894 return -EOPNOTSUPP;
895
896 mutex_lock(&vs->dev.mutex);
897 if ((features & (1 << VHOST_F_LOG_ALL)) &&
898 !vhost_log_access_ok(&vs->dev)) {
899 mutex_unlock(&vs->dev.mutex);
900 return -EFAULT;
901 }
902 vs->dev.acked_features = features;
101998f6
NB
903 smp_wmb();
904 vhost_scsi_flush(vs);
057cbf49
NB
905 mutex_unlock(&vs->dev.mutex);
906 return 0;
907}
908
909static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
910 unsigned long arg)
911{
912 struct vhost_scsi *vs = f->private_data;
913 struct vhost_scsi_target backend;
914 void __user *argp = (void __user *)arg;
915 u64 __user *featurep = argp;
916 u64 features;
101998f6 917 int r, abi_version = VHOST_SCSI_ABI_VERSION;
057cbf49
NB
918
919 switch (ioctl) {
920 case VHOST_SCSI_SET_ENDPOINT:
921 if (copy_from_user(&backend, argp, sizeof backend))
922 return -EFAULT;
6de7145c
MT
923 if (backend.reserved != 0)
924 return -EOPNOTSUPP;
057cbf49
NB
925
926 return vhost_scsi_set_endpoint(vs, &backend);
927 case VHOST_SCSI_CLEAR_ENDPOINT:
928 if (copy_from_user(&backend, argp, sizeof backend))
929 return -EFAULT;
6de7145c
MT
930 if (backend.reserved != 0)
931 return -EOPNOTSUPP;
057cbf49
NB
932
933 return vhost_scsi_clear_endpoint(vs, &backend);
934 case VHOST_SCSI_GET_ABI_VERSION:
101998f6 935 if (copy_to_user(argp, &abi_version, sizeof abi_version))
057cbf49
NB
936 return -EFAULT;
937 return 0;
938 case VHOST_GET_FEATURES:
939 features = VHOST_FEATURES;
940 if (copy_to_user(featurep, &features, sizeof features))
941 return -EFAULT;
942 return 0;
943 case VHOST_SET_FEATURES:
944 if (copy_from_user(&features, featurep, sizeof features))
945 return -EFAULT;
946 return vhost_scsi_set_features(vs, features);
947 default:
948 mutex_lock(&vs->dev.mutex);
935cdee7
MT
949 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
950 /* TODO: flush backend after dev ioctl. */
951 if (r == -ENOIOCTLCMD)
952 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
057cbf49
NB
953 mutex_unlock(&vs->dev.mutex);
954 return r;
955 }
956}
957
101998f6
NB
958#ifdef CONFIG_COMPAT
959static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
960 unsigned long arg)
961{
962 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
963}
964#endif
965
057cbf49
NB
966static const struct file_operations vhost_scsi_fops = {
967 .owner = THIS_MODULE,
968 .release = vhost_scsi_release,
969 .unlocked_ioctl = vhost_scsi_ioctl,
101998f6
NB
970#ifdef CONFIG_COMPAT
971 .compat_ioctl = vhost_scsi_compat_ioctl,
972#endif
057cbf49
NB
973 .open = vhost_scsi_open,
974 .llseek = noop_llseek,
975};
976
977static struct miscdevice vhost_scsi_misc = {
978 MISC_DYNAMIC_MINOR,
979 "vhost-scsi",
980 &vhost_scsi_fops,
981};
982
983static int __init vhost_scsi_register(void)
984{
985 return misc_register(&vhost_scsi_misc);
986}
987
988static int vhost_scsi_deregister(void)
989{
990 return misc_deregister(&vhost_scsi_misc);
991}
992
993static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
994{
995 switch (tport->tport_proto_id) {
996 case SCSI_PROTOCOL_SAS:
997 return "SAS";
998 case SCSI_PROTOCOL_FCP:
999 return "FCP";
1000 case SCSI_PROTOCOL_ISCSI:
1001 return "iSCSI";
1002 default:
1003 break;
1004 }
1005
1006 return "Unknown";
1007}
1008
101998f6 1009static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
057cbf49
NB
1010 struct se_lun *lun)
1011{
1012 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1013 struct tcm_vhost_tpg, se_tpg);
1014
101998f6
NB
1015 mutex_lock(&tv_tpg->tv_tpg_mutex);
1016 tv_tpg->tv_tpg_port_count++;
1017 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49
NB
1018
1019 return 0;
1020}
1021
101998f6 1022static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
057cbf49
NB
1023 struct se_lun *se_lun)
1024{
1025 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1026 struct tcm_vhost_tpg, se_tpg);
1027
101998f6
NB
1028 mutex_lock(&tv_tpg->tv_tpg_mutex);
1029 tv_tpg->tv_tpg_port_count--;
1030 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49
NB
1031}
1032
1033static struct se_node_acl *tcm_vhost_make_nodeacl(
1034 struct se_portal_group *se_tpg,
1035 struct config_group *group,
1036 const char *name)
1037{
1038 struct se_node_acl *se_nacl, *se_nacl_new;
1039 struct tcm_vhost_nacl *nacl;
1040 u64 wwpn = 0;
1041 u32 nexus_depth;
1042
1043 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1044 return ERR_PTR(-EINVAL); */
1045 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1046 if (!se_nacl_new)
1047 return ERR_PTR(-ENOMEM);
1048
1049 nexus_depth = 1;
1050 /*
1051 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1052 * when converting a NodeACL from demo mode -> explict
1053 */
1054 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1055 name, nexus_depth);
1056 if (IS_ERR(se_nacl)) {
1057 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1058 return se_nacl;
1059 }
1060 /*
1061 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1062 */
1063 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1064 nacl->iport_wwpn = wwpn;
1065
1066 return se_nacl;
1067}
1068
1069static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1070{
1071 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1072 struct tcm_vhost_nacl, se_node_acl);
1073 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1074 kfree(nacl);
1075}
1076
101998f6 1077static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
057cbf49
NB
1078 const char *name)
1079{
1080 struct se_portal_group *se_tpg;
1081 struct tcm_vhost_nexus *tv_nexus;
1082
1083 mutex_lock(&tv_tpg->tv_tpg_mutex);
1084 if (tv_tpg->tpg_nexus) {
1085 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1086 pr_debug("tv_tpg->tpg_nexus already exists\n");
1087 return -EEXIST;
1088 }
1089 se_tpg = &tv_tpg->se_tpg;
1090
1091 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1092 if (!tv_nexus) {
1093 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1094 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1095 return -ENOMEM;
1096 }
1097 /*
1098 * Initialize the struct se_session pointer
1099 */
1100 tv_nexus->tvn_se_sess = transport_init_session();
1101 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1102 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1103 kfree(tv_nexus);
1104 return -ENOMEM;
1105 }
1106 /*
1107 * Since we are running in 'demo mode' this call with generate a
1108 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1109 * the SCSI Initiator port name of the passed configfs group 'name'.
1110 */
1111 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1112 se_tpg, (unsigned char *)name);
1113 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1114 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1115 pr_debug("core_tpg_check_initiator_node_acl() failed"
1116 " for %s\n", name);
1117 transport_free_session(tv_nexus->tvn_se_sess);
1118 kfree(tv_nexus);
1119 return -ENOMEM;
1120 }
1121 /*
101998f6 1122 * Now register the TCM vhost virtual I_T Nexus as active with the
057cbf49
NB
1123 * call to __transport_register_session()
1124 */
1125 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1126 tv_nexus->tvn_se_sess, tv_nexus);
1127 tv_tpg->tpg_nexus = tv_nexus;
1128
1129 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1130 return 0;
1131}
1132
101998f6 1133static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
057cbf49
NB
1134{
1135 struct se_session *se_sess;
1136 struct tcm_vhost_nexus *tv_nexus;
1137
1138 mutex_lock(&tpg->tv_tpg_mutex);
1139 tv_nexus = tpg->tpg_nexus;
1140 if (!tv_nexus) {
1141 mutex_unlock(&tpg->tv_tpg_mutex);
1142 return -ENODEV;
1143 }
1144
1145 se_sess = tv_nexus->tvn_se_sess;
1146 if (!se_sess) {
1147 mutex_unlock(&tpg->tv_tpg_mutex);
1148 return -ENODEV;
1149 }
1150
101998f6 1151 if (tpg->tv_tpg_port_count != 0) {
057cbf49 1152 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1153 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1154 " active TPG port count: %d\n",
101998f6
NB
1155 tpg->tv_tpg_port_count);
1156 return -EBUSY;
057cbf49
NB
1157 }
1158
101998f6 1159 if (tpg->tv_tpg_vhost_count != 0) {
057cbf49 1160 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1161 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1162 " active TPG vhost count: %d\n",
101998f6
NB
1163 tpg->tv_tpg_vhost_count);
1164 return -EBUSY;
057cbf49
NB
1165 }
1166
101998f6 1167 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
057cbf49
NB
1168 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1169 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1170 /*
101998f6 1171 * Release the SCSI I_T Nexus to the emulated vhost Target Port
057cbf49
NB
1172 */
1173 transport_deregister_session(tv_nexus->tvn_se_sess);
1174 tpg->tpg_nexus = NULL;
1175 mutex_unlock(&tpg->tv_tpg_mutex);
1176
1177 kfree(tv_nexus);
1178 return 0;
1179}
1180
101998f6 1181static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1182 char *page)
1183{
1184 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1185 struct tcm_vhost_tpg, se_tpg);
1186 struct tcm_vhost_nexus *tv_nexus;
1187 ssize_t ret;
1188
1189 mutex_lock(&tv_tpg->tv_tpg_mutex);
1190 tv_nexus = tv_tpg->tpg_nexus;
1191 if (!tv_nexus) {
1192 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1193 return -ENODEV;
1194 }
1195 ret = snprintf(page, PAGE_SIZE, "%s\n",
1196 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1197 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1198
1199 return ret;
1200}
1201
101998f6 1202static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1203 const char *page,
1204 size_t count)
1205{
1206 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1207 struct tcm_vhost_tpg, se_tpg);
1208 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1209 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1210 int ret;
1211 /*
1212 * Shutdown the active I_T nexus if 'NULL' is passed..
1213 */
1214 if (!strncmp(page, "NULL", 4)) {
1215 ret = tcm_vhost_drop_nexus(tv_tpg);
1216 return (!ret) ? count : ret;
1217 }
1218 /*
1219 * Otherwise make sure the passed virtual Initiator port WWN matches
1220 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1221 * tcm_vhost_make_nexus().
1222 */
1223 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1224 pr_err("Emulated NAA Sas Address: %s, exceeds"
1225 " max: %d\n", page, TCM_VHOST_NAMELEN);
1226 return -EINVAL;
1227 }
1228 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1229
1230 ptr = strstr(i_port, "naa.");
1231 if (ptr) {
1232 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1233 pr_err("Passed SAS Initiator Port %s does not"
1234 " match target port protoid: %s\n", i_port,
1235 tcm_vhost_dump_proto_id(tport_wwn));
1236 return -EINVAL;
1237 }
1238 port_ptr = &i_port[0];
1239 goto check_newline;
1240 }
1241 ptr = strstr(i_port, "fc.");
1242 if (ptr) {
1243 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1244 pr_err("Passed FCP Initiator Port %s does not"
1245 " match target port protoid: %s\n", i_port,
1246 tcm_vhost_dump_proto_id(tport_wwn));
1247 return -EINVAL;
1248 }
1249 port_ptr = &i_port[3]; /* Skip over "fc." */
1250 goto check_newline;
1251 }
1252 ptr = strstr(i_port, "iqn.");
1253 if (ptr) {
1254 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1255 pr_err("Passed iSCSI Initiator Port %s does not"
1256 " match target port protoid: %s\n", i_port,
1257 tcm_vhost_dump_proto_id(tport_wwn));
1258 return -EINVAL;
1259 }
1260 port_ptr = &i_port[0];
1261 goto check_newline;
1262 }
1263 pr_err("Unable to locate prefix for emulated Initiator Port:"
1264 " %s\n", i_port);
1265 return -EINVAL;
1266 /*
1267 * Clear any trailing newline for the NAA WWN
1268 */
1269check_newline:
1270 if (i_port[strlen(i_port)-1] == '\n')
1271 i_port[strlen(i_port)-1] = '\0';
1272
1273 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1274 if (ret < 0)
1275 return ret;
1276
1277 return count;
1278}
1279
1280TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1281
1282static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1283 &tcm_vhost_tpg_nexus.attr,
1284 NULL,
1285};
1286
101998f6 1287static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
057cbf49
NB
1288 struct config_group *group,
1289 const char *name)
1290{
1291 struct tcm_vhost_tport *tport = container_of(wwn,
1292 struct tcm_vhost_tport, tport_wwn);
1293
1294 struct tcm_vhost_tpg *tpg;
1295 unsigned long tpgt;
1296 int ret;
1297
1298 if (strstr(name, "tpgt_") != name)
1299 return ERR_PTR(-EINVAL);
1300 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1301 return ERR_PTR(-EINVAL);
1302
1303 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1304 if (!tpg) {
1305 pr_err("Unable to allocate struct tcm_vhost_tpg");
1306 return ERR_PTR(-ENOMEM);
1307 }
1308 mutex_init(&tpg->tv_tpg_mutex);
1309 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1310 tpg->tport = tport;
1311 tpg->tport_tpgt = tpgt;
1312
1313 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1314 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1315 if (ret < 0) {
1316 kfree(tpg);
1317 return NULL;
1318 }
1319 mutex_lock(&tcm_vhost_mutex);
1320 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1321 mutex_unlock(&tcm_vhost_mutex);
1322
1323 return &tpg->se_tpg;
1324}
1325
1326static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1327{
1328 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1329 struct tcm_vhost_tpg, se_tpg);
1330
1331 mutex_lock(&tcm_vhost_mutex);
1332 list_del(&tpg->tv_tpg_list);
1333 mutex_unlock(&tcm_vhost_mutex);
1334 /*
101998f6 1335 * Release the virtual I_T Nexus for this vhost TPG
057cbf49
NB
1336 */
1337 tcm_vhost_drop_nexus(tpg);
1338 /*
1339 * Deregister the se_tpg from TCM..
1340 */
1341 core_tpg_deregister(se_tpg);
1342 kfree(tpg);
1343}
1344
101998f6 1345static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
057cbf49
NB
1346 struct config_group *group,
1347 const char *name)
1348{
1349 struct tcm_vhost_tport *tport;
1350 char *ptr;
1351 u64 wwpn = 0;
1352 int off = 0;
1353
1354 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1355 return ERR_PTR(-EINVAL); */
1356
1357 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1358 if (!tport) {
1359 pr_err("Unable to allocate struct tcm_vhost_tport");
1360 return ERR_PTR(-ENOMEM);
1361 }
1362 tport->tport_wwpn = wwpn;
1363 /*
1364 * Determine the emulated Protocol Identifier and Target Port Name
1365 * based on the incoming configfs directory name.
1366 */
1367 ptr = strstr(name, "naa.");
1368 if (ptr) {
1369 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1370 goto check_len;
1371 }
1372 ptr = strstr(name, "fc.");
1373 if (ptr) {
1374 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1375 off = 3; /* Skip over "fc." */
1376 goto check_len;
1377 }
1378 ptr = strstr(name, "iqn.");
1379 if (ptr) {
1380 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1381 goto check_len;
1382 }
1383
1384 pr_err("Unable to locate prefix for emulated Target Port:"
1385 " %s\n", name);
1386 kfree(tport);
1387 return ERR_PTR(-EINVAL);
1388
1389check_len:
1390 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1391 pr_err("Emulated %s Address: %s, exceeds"
1392 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1393 TCM_VHOST_NAMELEN);
1394 kfree(tport);
1395 return ERR_PTR(-EINVAL);
1396 }
1397 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1398
1399 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1400 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1401
1402 return &tport->tport_wwn;
1403}
1404
1405static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1406{
1407 struct tcm_vhost_tport *tport = container_of(wwn,
1408 struct tcm_vhost_tport, tport_wwn);
1409
1410 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1411 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1412 tport->tport_name);
1413
1414 kfree(tport);
1415}
1416
1417static ssize_t tcm_vhost_wwn_show_attr_version(
1418 struct target_fabric_configfs *tf,
1419 char *page)
1420{
1421 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1422 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1423 utsname()->machine);
1424}
1425
1426TF_WWN_ATTR_RO(tcm_vhost, version);
1427
1428static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1429 &tcm_vhost_wwn_version.attr,
1430 NULL,
1431};
1432
1433static struct target_core_fabric_ops tcm_vhost_ops = {
1434 .get_fabric_name = tcm_vhost_get_fabric_name,
1435 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1436 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1437 .tpg_get_tag = tcm_vhost_get_tag,
1438 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1439 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
1440 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
1441 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
1442 .tpg_check_demo_mode = tcm_vhost_check_true,
1443 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
1444 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1445 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1446 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
1447 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
1448 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
1449 .release_cmd = tcm_vhost_release_cmd,
1450 .shutdown_session = tcm_vhost_shutdown_session,
1451 .close_session = tcm_vhost_close_session,
1452 .sess_get_index = tcm_vhost_sess_get_index,
1453 .sess_get_initiator_sid = NULL,
1454 .write_pending = tcm_vhost_write_pending,
1455 .write_pending_status = tcm_vhost_write_pending_status,
1456 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
1457 .get_task_tag = tcm_vhost_get_task_tag,
1458 .get_cmd_state = tcm_vhost_get_cmd_state,
1459 .queue_data_in = tcm_vhost_queue_data_in,
1460 .queue_status = tcm_vhost_queue_status,
1461 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
057cbf49
NB
1462 /*
1463 * Setup callers for generic logic in target_core_fabric_configfs.c
1464 */
1465 .fabric_make_wwn = tcm_vhost_make_tport,
1466 .fabric_drop_wwn = tcm_vhost_drop_tport,
1467 .fabric_make_tpg = tcm_vhost_make_tpg,
1468 .fabric_drop_tpg = tcm_vhost_drop_tpg,
1469 .fabric_post_link = tcm_vhost_port_link,
1470 .fabric_pre_unlink = tcm_vhost_port_unlink,
1471 .fabric_make_np = NULL,
1472 .fabric_drop_np = NULL,
1473 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
1474 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
1475};
1476
1477static int tcm_vhost_register_configfs(void)
1478{
1479 struct target_fabric_configfs *fabric;
1480 int ret;
1481
1482 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1483 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1484 utsname()->machine);
1485 /*
1486 * Register the top level struct config_item_type with TCM core
1487 */
1488 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1489 if (IS_ERR(fabric)) {
1490 pr_err("target_fabric_configfs_init() failed\n");
1491 return PTR_ERR(fabric);
1492 }
1493 /*
1494 * Setup fabric->tf_ops from our local tcm_vhost_ops
1495 */
1496 fabric->tf_ops = tcm_vhost_ops;
1497 /*
1498 * Setup default attribute lists for various fabric->tf_cit_tmpl
1499 */
1500 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1501 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1502 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1503 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1504 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1505 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1506 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1507 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1508 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1509 /*
1510 * Register the fabric for use within TCM
1511 */
1512 ret = target_fabric_configfs_register(fabric);
1513 if (ret < 0) {
1514 pr_err("target_fabric_configfs_register() failed"
1515 " for TCM_VHOST\n");
1516 return ret;
1517 }
1518 /*
1519 * Setup our local pointer to *fabric
1520 */
1521 tcm_vhost_fabric_configfs = fabric;
1522 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1523 return 0;
1524};
1525
1526static void tcm_vhost_deregister_configfs(void)
1527{
1528 if (!tcm_vhost_fabric_configfs)
1529 return;
1530
1531 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
1532 tcm_vhost_fabric_configfs = NULL;
1533 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
1534};
1535
1536static int __init tcm_vhost_init(void)
1537{
1538 int ret = -ENOMEM;
101998f6
NB
1539 /*
1540 * Use our own dedicated workqueue for submitting I/O into
1541 * target core to avoid contention within system_wq.
1542 */
057cbf49
NB
1543 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1544 if (!tcm_vhost_workqueue)
1545 goto out;
1546
1547 ret = vhost_scsi_register();
1548 if (ret < 0)
1549 goto out_destroy_workqueue;
1550
1551 ret = tcm_vhost_register_configfs();
1552 if (ret < 0)
1553 goto out_vhost_scsi_deregister;
1554
1555 return 0;
1556
1557out_vhost_scsi_deregister:
1558 vhost_scsi_deregister();
1559out_destroy_workqueue:
1560 destroy_workqueue(tcm_vhost_workqueue);
1561out:
1562 return ret;
1563};
1564
1565static void tcm_vhost_exit(void)
1566{
1567 tcm_vhost_deregister_configfs();
1568 vhost_scsi_deregister();
1569 destroy_workqueue(tcm_vhost_workqueue);
1570};
1571
1572MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
1573MODULE_LICENSE("GPL");
1574module_init(tcm_vhost_init);
1575module_exit(tcm_vhost_exit);
This page took 0.331261 seconds and 5 git commands to generate.