vhost: move vhost-net zerocopy fields to net.c
[deliverable/linux.git] / drivers / vhost / tcm_vhost.c
CommitLineData
057cbf49
NB
1/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/kthread.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/configfs.h>
34#include <linux/ctype.h>
35#include <linux/compat.h>
36#include <linux/eventfd.h>
057cbf49
NB
37#include <linux/fs.h>
38#include <linux/miscdevice.h>
39#include <asm/unaligned.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h>
43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47#include <linux/vhost.h>
48#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49#include <linux/virtio_scsi.h>
9d6064a3 50#include <linux/llist.h>
1b7f390e 51#include <linux/bitmap.h>
057cbf49
NB
52
53#include "vhost.c"
54#include "vhost.h"
55#include "tcm_vhost.h"
56
101998f6
NB
57enum {
58 VHOST_SCSI_VQ_CTL = 0,
59 VHOST_SCSI_VQ_EVT = 1,
60 VHOST_SCSI_VQ_IO = 2,
61};
62
5dade710
NB
63/*
64 * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
65 * kernel but disabling it helps.
66 * TODO: debug and remove the workaround.
67 */
68enum {
04b59bab
AH
69 VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
70 (1ULL << VIRTIO_SCSI_F_HOTPLUG)
5dade710
NB
71};
72
1b7f390e
AH
73#define VHOST_SCSI_MAX_TARGET 256
74#define VHOST_SCSI_MAX_VQ 128
a6c9af87 75#define VHOST_SCSI_MAX_EVENT 128
67e18cf9 76
f2f0173d
AH
77struct vhost_scsi_inflight {
78 /* Wait for the flush operation to finish */
79 struct completion comp;
80 /* Refcount for the inflight reqs */
81 struct kref kref;
82};
83
3ab2e420
AH
84struct vhost_scsi_virtqueue {
85 struct vhost_virtqueue vq;
f2f0173d
AH
86 /* Track inflight reqs, protected by vq->mutex */
87 struct vhost_scsi_inflight inflights[2];
88 /* Indicate current inflight in use, protected by vq->mutex */
89 int inflight_idx;
3ab2e420
AH
90};
91
057cbf49 92struct vhost_scsi {
67e18cf9 93 /* Protected by vhost_scsi->dev.mutex */
4f7f46d3 94 struct tcm_vhost_tpg **vs_tpg;
67e18cf9 95 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
67e18cf9 96
057cbf49 97 struct vhost_dev dev;
3ab2e420 98 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
057cbf49
NB
99
100 struct vhost_work vs_completion_work; /* cmd completion work item */
9d6064a3 101 struct llist_head vs_completion_list; /* cmd completion queue */
a6c9af87
AH
102
103 struct vhost_work vs_event_work; /* evt injection work item */
104 struct llist_head vs_event_list; /* evt injection queue */
105
106 bool vs_events_missed; /* any missed events, protected by vq->mutex */
107 int vs_events_nr; /* num of pending events, protected by vq->mutex */
057cbf49
NB
108};
109
110/* Local pointer to allocated TCM configfs fabric module */
111static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
112
113static struct workqueue_struct *tcm_vhost_workqueue;
114
115/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
116static DEFINE_MUTEX(tcm_vhost_mutex);
117static LIST_HEAD(tcm_vhost_list);
118
765b34fd
AH
119static int iov_num_pages(struct iovec *iov)
120{
121 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
122 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
123}
124
f2f0173d
AH
125void tcm_vhost_done_inflight(struct kref *kref)
126{
127 struct vhost_scsi_inflight *inflight;
128
129 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
130 complete(&inflight->comp);
131}
132
133static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
134 struct vhost_scsi_inflight *old_inflight[])
135{
136 struct vhost_scsi_inflight *new_inflight;
137 struct vhost_virtqueue *vq;
138 int idx, i;
139
140 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
141 vq = &vs->vqs[i].vq;
142
143 mutex_lock(&vq->mutex);
144
145 /* store old infight */
146 idx = vs->vqs[i].inflight_idx;
147 if (old_inflight)
148 old_inflight[i] = &vs->vqs[i].inflights[idx];
149
150 /* setup new infight */
151 vs->vqs[i].inflight_idx = idx ^ 1;
152 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
153 kref_init(&new_inflight->kref);
154 init_completion(&new_inflight->comp);
155
156 mutex_unlock(&vq->mutex);
157 }
158}
159
160static struct vhost_scsi_inflight *
161tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
162{
163 struct vhost_scsi_inflight *inflight;
164 struct vhost_scsi_virtqueue *svq;
165
166 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
167 inflight = &svq->inflights[svq->inflight_idx];
168 kref_get(&inflight->kref);
169
170 return inflight;
171}
172
173static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
174{
175 kref_put(&inflight->kref, tcm_vhost_done_inflight);
176}
177
057cbf49
NB
178static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
179{
180 return 1;
181}
182
183static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
184{
185 return 0;
186}
187
188static char *tcm_vhost_get_fabric_name(void)
189{
190 return "vhost";
191}
192
193static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
194{
195 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
196 struct tcm_vhost_tpg, se_tpg);
197 struct tcm_vhost_tport *tport = tpg->tport;
198
199 switch (tport->tport_proto_id) {
200 case SCSI_PROTOCOL_SAS:
201 return sas_get_fabric_proto_ident(se_tpg);
202 case SCSI_PROTOCOL_FCP:
203 return fc_get_fabric_proto_ident(se_tpg);
204 case SCSI_PROTOCOL_ISCSI:
205 return iscsi_get_fabric_proto_ident(se_tpg);
206 default:
207 pr_err("Unknown tport_proto_id: 0x%02x, using"
208 " SAS emulation\n", tport->tport_proto_id);
209 break;
210 }
211
212 return sas_get_fabric_proto_ident(se_tpg);
213}
214
215static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
216{
217 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
218 struct tcm_vhost_tpg, se_tpg);
219 struct tcm_vhost_tport *tport = tpg->tport;
220
221 return &tport->tport_name[0];
222}
223
224static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
225{
226 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
227 struct tcm_vhost_tpg, se_tpg);
228 return tpg->tport_tpgt;
229}
230
231static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
232{
233 return 1;
234}
235
101998f6 236static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
237 struct se_node_acl *se_nacl,
238 struct t10_pr_registration *pr_reg,
239 int *format_code,
240 unsigned char *buf)
241{
242 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
243 struct tcm_vhost_tpg, se_tpg);
244 struct tcm_vhost_tport *tport = tpg->tport;
245
246 switch (tport->tport_proto_id) {
247 case SCSI_PROTOCOL_SAS:
248 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
249 format_code, buf);
250 case SCSI_PROTOCOL_FCP:
251 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
252 format_code, buf);
253 case SCSI_PROTOCOL_ISCSI:
254 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
255 format_code, buf);
256 default:
257 pr_err("Unknown tport_proto_id: 0x%02x, using"
258 " SAS emulation\n", tport->tport_proto_id);
259 break;
260 }
261
262 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
263 format_code, buf);
264}
265
101998f6 266static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
057cbf49
NB
267 struct se_node_acl *se_nacl,
268 struct t10_pr_registration *pr_reg,
269 int *format_code)
270{
271 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
272 struct tcm_vhost_tpg, se_tpg);
273 struct tcm_vhost_tport *tport = tpg->tport;
274
275 switch (tport->tport_proto_id) {
276 case SCSI_PROTOCOL_SAS:
277 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
278 format_code);
279 case SCSI_PROTOCOL_FCP:
280 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
281 format_code);
282 case SCSI_PROTOCOL_ISCSI:
283 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
284 format_code);
285 default:
286 pr_err("Unknown tport_proto_id: 0x%02x, using"
287 " SAS emulation\n", tport->tport_proto_id);
288 break;
289 }
290
291 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
292 format_code);
293}
294
101998f6 295static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
296 const char *buf,
297 u32 *out_tid_len,
298 char **port_nexus_ptr)
299{
300 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
301 struct tcm_vhost_tpg, se_tpg);
302 struct tcm_vhost_tport *tport = tpg->tport;
303
304 switch (tport->tport_proto_id) {
305 case SCSI_PROTOCOL_SAS:
306 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
307 port_nexus_ptr);
308 case SCSI_PROTOCOL_FCP:
309 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
310 port_nexus_ptr);
311 case SCSI_PROTOCOL_ISCSI:
312 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
313 port_nexus_ptr);
314 default:
315 pr_err("Unknown tport_proto_id: 0x%02x, using"
316 " SAS emulation\n", tport->tport_proto_id);
317 break;
318 }
319
320 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
321 port_nexus_ptr);
322}
323
324static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
325 struct se_portal_group *se_tpg)
326{
327 struct tcm_vhost_nacl *nacl;
328
329 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
330 if (!nacl) {
744627e9 331 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
057cbf49
NB
332 return NULL;
333 }
334
335 return &nacl->se_node_acl;
336}
337
101998f6 338static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
057cbf49
NB
339 struct se_node_acl *se_nacl)
340{
341 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
342 struct tcm_vhost_nacl, se_node_acl);
343 kfree(nacl);
344}
345
346static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
347{
348 return 1;
349}
350
351static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
352{
353 return;
354}
355
356static int tcm_vhost_shutdown_session(struct se_session *se_sess)
357{
358 return 0;
359}
360
361static void tcm_vhost_close_session(struct se_session *se_sess)
362{
363 return;
364}
365
366static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
367{
368 return 0;
369}
370
371static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
372{
373 /* Go ahead and process the write immediately */
374 target_execute_cmd(se_cmd);
375 return 0;
376}
377
378static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
379{
380 return 0;
381}
382
383static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
384{
385 return;
386}
387
388static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
389{
390 return 0;
391}
392
393static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
394{
395 return 0;
396}
397
101998f6
NB
398static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
399{
400 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
401
9d6064a3 402 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
101998f6
NB
403
404 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
405}
057cbf49
NB
406
407static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
408{
409 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
410 struct tcm_vhost_cmd, tvc_se_cmd);
411 vhost_scsi_complete_cmd(tv_cmd);
412 return 0;
413}
414
415static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
416{
417 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
418 struct tcm_vhost_cmd, tvc_se_cmd);
419 vhost_scsi_complete_cmd(tv_cmd);
420 return 0;
421}
422
423static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
424{
425 return 0;
426}
427
a6c9af87
AH
428static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
429{
430 vs->vs_events_nr--;
431 kfree(evt);
432}
433
434static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
435 u32 event, u32 reason)
436{
3ab2e420 437 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87
AH
438 struct tcm_vhost_evt *evt;
439
440 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
441 vs->vs_events_missed = true;
442 return NULL;
443 }
444
445 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
446 if (!evt) {
447 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
448 vs->vs_events_missed = true;
449 return NULL;
450 }
451
452 evt->event.event = event;
453 evt->event.reason = reason;
454 vs->vs_events_nr++;
455
456 return evt;
457}
458
057cbf49
NB
459static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
460{
461 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
462
463 /* TODO locking against target/backend threads? */
464 transport_generic_free_cmd(se_cmd, 1);
465
466 if (tv_cmd->tvc_sgl_count) {
467 u32 i;
468 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
469 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
470
471 kfree(tv_cmd->tvc_sgl);
472 }
473
f2f0173d
AH
474 tcm_vhost_put_inflight(tv_cmd->inflight);
475
057cbf49
NB
476 kfree(tv_cmd);
477}
478
a6c9af87
AH
479static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
480 struct tcm_vhost_evt *evt)
481{
3ab2e420 482 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87
AH
483 struct virtio_scsi_event *event = &evt->event;
484 struct virtio_scsi_event __user *eventp;
485 unsigned out, in;
486 int head, ret;
487
488 if (!vq->private_data) {
489 vs->vs_events_missed = true;
490 return;
491 }
492
493again:
494 vhost_disable_notify(&vs->dev, vq);
495 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
496 ARRAY_SIZE(vq->iov), &out, &in,
497 NULL, NULL);
498 if (head < 0) {
499 vs->vs_events_missed = true;
500 return;
501 }
502 if (head == vq->num) {
503 if (vhost_enable_notify(&vs->dev, vq))
504 goto again;
505 vs->vs_events_missed = true;
506 return;
507 }
508
509 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
510 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
511 vq->iov[out].iov_len);
512 vs->vs_events_missed = true;
513 return;
514 }
515
516 if (vs->vs_events_missed) {
517 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
518 vs->vs_events_missed = false;
519 }
520
521 eventp = vq->iov[out].iov_base;
522 ret = __copy_to_user(eventp, event, sizeof(*event));
523 if (!ret)
524 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
525 else
526 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
527}
528
529static void tcm_vhost_evt_work(struct vhost_work *work)
530{
531 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
532 vs_event_work);
3ab2e420 533 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87
AH
534 struct tcm_vhost_evt *evt;
535 struct llist_node *llnode;
536
537 mutex_lock(&vq->mutex);
538 llnode = llist_del_all(&vs->vs_event_list);
539 while (llnode) {
540 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
541 llnode = llist_next(llnode);
542 tcm_vhost_do_evt_work(vs, evt);
543 tcm_vhost_free_evt(vs, evt);
544 }
545 mutex_unlock(&vq->mutex);
546}
547
057cbf49
NB
548/* Fill in status and signal that we are done processing this command
549 *
550 * This is scheduled in the vhost work queue so we are called with the owner
551 * process mm and can access the vring.
552 */
553static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
554{
555 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
556 vs_completion_work);
1b7f390e 557 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
9d6064a3 558 struct virtio_scsi_cmd_resp v_rsp;
057cbf49 559 struct tcm_vhost_cmd *tv_cmd;
9d6064a3
AH
560 struct llist_node *llnode;
561 struct se_cmd *se_cmd;
1b7f390e 562 int ret, vq;
057cbf49 563
1b7f390e 564 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
9d6064a3
AH
565 llnode = llist_del_all(&vs->vs_completion_list);
566 while (llnode) {
567 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
568 tvc_completion_list);
569 llnode = llist_next(llnode);
570 se_cmd = &tv_cmd->tvc_se_cmd;
057cbf49
NB
571
572 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
573 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
574
575 memset(&v_rsp, 0, sizeof(v_rsp));
576 v_rsp.resid = se_cmd->residual_count;
577 /* TODO is status_qualifier field needed? */
578 v_rsp.status = se_cmd->scsi_status;
579 v_rsp.sense_len = se_cmd->scsi_sense_length;
580 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
581 v_rsp.sense_len);
582 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
1b7f390e 583 if (likely(ret == 0)) {
3ab2e420 584 struct vhost_scsi_virtqueue *q;
1b7f390e 585 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
3ab2e420
AH
586 q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
587 vq = q - vs->vqs;
1b7f390e
AH
588 __set_bit(vq, signal);
589 } else
057cbf49
NB
590 pr_err("Faulted on virtio_scsi_cmd_resp\n");
591
592 vhost_scsi_free_cmd(tv_cmd);
593 }
594
1b7f390e
AH
595 vq = -1;
596 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
597 < VHOST_SCSI_MAX_VQ)
3ab2e420 598 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
057cbf49
NB
599}
600
057cbf49 601static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
f2f0173d 602 struct vhost_virtqueue *vq,
057cbf49
NB
603 struct tcm_vhost_tpg *tv_tpg,
604 struct virtio_scsi_cmd_req *v_req,
605 u32 exp_data_len,
606 int data_direction)
607{
608 struct tcm_vhost_cmd *tv_cmd;
609 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
610
611 tv_nexus = tv_tpg->tpg_nexus;
612 if (!tv_nexus) {
613 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
614 return ERR_PTR(-EIO);
615 }
057cbf49
NB
616
617 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
618 if (!tv_cmd) {
619 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
620 return ERR_PTR(-ENOMEM);
621 }
057cbf49 622 tv_cmd->tvc_tag = v_req->tag;
9f0abc15
NB
623 tv_cmd->tvc_task_attr = v_req->task_attr;
624 tv_cmd->tvc_exp_data_len = exp_data_len;
625 tv_cmd->tvc_data_direction = data_direction;
626 tv_cmd->tvc_nexus = tv_nexus;
f2f0173d 627 tv_cmd->inflight = tcm_vhost_get_inflight(vq);
057cbf49 628
057cbf49
NB
629 return tv_cmd;
630}
631
632/*
633 * Map a user memory range into a scatterlist
634 *
635 * Returns the number of scatterlist entries used or -errno on error.
636 */
637static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
1810053e 638 unsigned int sgl_count, struct iovec *iov, int write)
057cbf49 639{
1810053e 640 unsigned int npages = 0, pages_nr, offset, nbytes;
057cbf49 641 struct scatterlist *sg = sgl;
1810053e
AH
642 void __user *ptr = iov->iov_base;
643 size_t len = iov->iov_len;
644 struct page **pages;
645 int ret, i;
057cbf49 646
1810053e
AH
647 pages_nr = iov_num_pages(iov);
648 if (pages_nr > sgl_count)
649 return -ENOBUFS;
057cbf49 650
1810053e
AH
651 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
652 if (!pages)
653 return -ENOMEM;
057cbf49 654
1810053e
AH
655 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
656 /* No pages were pinned */
657 if (ret < 0)
658 goto out;
659 /* Less pages pinned than wanted */
660 if (ret != pages_nr) {
661 for (i = 0; i < ret; i++)
662 put_page(pages[i]);
663 ret = -EFAULT;
664 goto out;
665 }
666
667 while (len > 0) {
668 offset = (uintptr_t)ptr & ~PAGE_MASK;
669 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
670 sg_set_page(sg, pages[npages], nbytes, offset);
057cbf49
NB
671 ptr += nbytes;
672 len -= nbytes;
673 sg++;
674 npages++;
675 }
057cbf49 676
1810053e
AH
677out:
678 kfree(pages);
057cbf49
NB
679 return ret;
680}
681
682static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
683 struct iovec *iov, unsigned int niov, int write)
684{
685 int ret;
686 unsigned int i;
687 u32 sgl_count;
688 struct scatterlist *sg;
689
690 /*
691 * Find out how long sglist needs to be
692 */
693 sgl_count = 0;
f3158f36
AH
694 for (i = 0; i < niov; i++)
695 sgl_count += iov_num_pages(&iov[i]);
696
057cbf49
NB
697 /* TODO overflow checking */
698
699 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
700 if (!sg)
701 return -ENOMEM;
f0e0e9bb
FW
702 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
703 sg, sgl_count, !sg);
057cbf49
NB
704 sg_init_table(sg, sgl_count);
705
706 tv_cmd->tvc_sgl = sg;
707 tv_cmd->tvc_sgl_count = sgl_count;
708
709 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
710 for (i = 0; i < niov; i++) {
1810053e 711 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
057cbf49
NB
712 if (ret < 0) {
713 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
714 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
715 kfree(tv_cmd->tvc_sgl);
716 tv_cmd->tvc_sgl = NULL;
717 tv_cmd->tvc_sgl_count = 0;
718 return ret;
719 }
720
721 sg += ret;
722 sgl_count -= ret;
723 }
724 return 0;
725}
726
727static void tcm_vhost_submission_work(struct work_struct *work)
728{
729 struct tcm_vhost_cmd *tv_cmd =
730 container_of(work, struct tcm_vhost_cmd, work);
9f0abc15 731 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
732 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
733 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
734 int rc, sg_no_bidi = 0;
057cbf49
NB
735
736 if (tv_cmd->tvc_sgl_count) {
737 sg_ptr = tv_cmd->tvc_sgl;
057cbf49
NB
738/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
739#if 0
740 if (se_cmd->se_cmd_flags & SCF_BIDI) {
741 sg_bidi_ptr = NULL;
742 sg_no_bidi = 0;
743 }
744#endif
745 } else {
746 sg_ptr = NULL;
747 }
9f0abc15
NB
748 tv_nexus = tv_cmd->tvc_nexus;
749
750 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
751 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
752 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
753 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
754 0, sg_ptr, tv_cmd->tvc_sgl_count,
755 sg_bidi_ptr, sg_no_bidi);
057cbf49
NB
756 if (rc < 0) {
757 transport_send_check_condition_and_sense(se_cmd,
9f0abc15 758 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
057cbf49 759 transport_generic_free_cmd(se_cmd, 0);
057cbf49 760 }
057cbf49
NB
761}
762
637ab21e
AH
763static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
764 struct vhost_virtqueue *vq, int head, unsigned out)
765{
766 struct virtio_scsi_cmd_resp __user *resp;
767 struct virtio_scsi_cmd_resp rsp;
768 int ret;
769
770 memset(&rsp, 0, sizeof(rsp));
771 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
772 resp = vq->iov[out].iov_base;
773 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
774 if (!ret)
775 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
776 else
777 pr_err("Faulted on virtio_scsi_cmd_resp\n");
778}
779
1b7f390e
AH
780static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
781 struct vhost_virtqueue *vq)
057cbf49 782{
4f7f46d3 783 struct tcm_vhost_tpg **vs_tpg;
057cbf49
NB
784 struct virtio_scsi_cmd_req v_req;
785 struct tcm_vhost_tpg *tv_tpg;
786 struct tcm_vhost_cmd *tv_cmd;
787 u32 exp_data_len, data_first, data_num, data_direction;
788 unsigned out, in, i;
789 int head, ret;
67e18cf9 790 u8 target;
057cbf49 791
4f7f46d3
AH
792 /*
793 * We can handle the vq only after the endpoint is setup by calling the
794 * VHOST_SCSI_SET_ENDPOINT ioctl.
795 *
796 * TODO: Check that we are running from vhost_worker which acts
797 * as read-side critical section for vhost kind of RCU.
798 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
799 */
800 vs_tpg = rcu_dereference_check(vq->private_data, 1);
801 if (!vs_tpg)
057cbf49 802 return;
057cbf49
NB
803
804 mutex_lock(&vq->mutex);
805 vhost_disable_notify(&vs->dev, vq);
806
807 for (;;) {
808 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
809 ARRAY_SIZE(vq->iov), &out, &in,
810 NULL, NULL);
811 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
812 head, out, in);
813 /* On error, stop handling until the next kick. */
814 if (unlikely(head < 0))
815 break;
816 /* Nothing new? Wait for eventfd to tell us they refilled. */
817 if (head == vq->num) {
818 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
819 vhost_disable_notify(&vs->dev, vq);
820 continue;
821 }
822 break;
823 }
824
825/* FIXME: BIDI operation */
826 if (out == 1 && in == 1) {
827 data_direction = DMA_NONE;
828 data_first = 0;
829 data_num = 0;
830 } else if (out == 1 && in > 1) {
831 data_direction = DMA_FROM_DEVICE;
832 data_first = out + 1;
833 data_num = in - 1;
834 } else if (out > 1 && in == 1) {
835 data_direction = DMA_TO_DEVICE;
836 data_first = 1;
837 data_num = out - 1;
838 } else {
839 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
840 out, in);
841 break;
842 }
843
844 /*
845 * Check for a sane resp buffer so we can report errors to
846 * the guest.
847 */
848 if (unlikely(vq->iov[out].iov_len !=
849 sizeof(struct virtio_scsi_cmd_resp))) {
850 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
851 " bytes\n", vq->iov[out].iov_len);
852 break;
853 }
854
855 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
856 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
857 " bytes\n", vq->iov[0].iov_len);
858 break;
859 }
860 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
861 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
862 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
863 sizeof(v_req));
864 if (unlikely(ret)) {
865 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
866 break;
867 }
868
67e18cf9
AH
869 /* Extract the tpgt */
870 target = v_req.lun[1];
4f7f46d3 871 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
67e18cf9
AH
872
873 /* Target does not exist, fail the request */
874 if (unlikely(!tv_tpg)) {
637ab21e 875 vhost_scsi_send_bad_target(vs, vq, head, out);
67e18cf9
AH
876 continue;
877 }
878
057cbf49
NB
879 exp_data_len = 0;
880 for (i = 0; i < data_num; i++)
881 exp_data_len += vq->iov[data_first + i].iov_len;
882
f2f0173d 883 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
057cbf49
NB
884 exp_data_len, data_direction);
885 if (IS_ERR(tv_cmd)) {
886 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
887 PTR_ERR(tv_cmd));
055f648c 888 goto err_cmd;
057cbf49
NB
889 }
890 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
891 ": %d\n", tv_cmd, exp_data_len, data_direction);
892
893 tv_cmd->tvc_vhost = vs;
1b7f390e 894 tv_cmd->tvc_vq = vq;
057cbf49
NB
895 tv_cmd->tvc_resp = vq->iov[out].iov_base;
896
897 /*
898 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
899 * that will be used by tcm_vhost_new_cmd_map() and down into
900 * target_setup_cmd_from_cdb()
901 */
902 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
903 /*
904 * Check that the recieved CDB size does not exceeded our
905 * hardcoded max for tcm_vhost
906 */
907 /* TODO what if cdb was too small for varlen cdb header? */
908 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
909 TCM_VHOST_MAX_CDB_SIZE)) {
910 vq_err(vq, "Received SCSI CDB with command_size: %d that"
911 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
912 scsi_command_size(tv_cmd->tvc_cdb),
913 TCM_VHOST_MAX_CDB_SIZE);
055f648c 914 goto err_free;
057cbf49
NB
915 }
916 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
917
918 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
919 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
920
921 if (data_direction != DMA_NONE) {
922 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
923 &vq->iov[data_first], data_num,
924 data_direction == DMA_TO_DEVICE);
925 if (unlikely(ret)) {
926 vq_err(vq, "Failed to map iov to sgl\n");
055f648c 927 goto err_free;
057cbf49
NB
928 }
929 }
930
931 /*
932 * Save the descriptor from vhost_get_vq_desc() to be used to
933 * complete the virtio-scsi request in TCM callback context via
934 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
935 */
936 tv_cmd->tvc_vq_desc = head;
937 /*
938 * Dispatch tv_cmd descriptor for cmwq execution in process
939 * context provided by tcm_vhost_workqueue. This also ensures
940 * tv_cmd is executed on the same kworker CPU as this vhost
941 * thread to gain positive L2 cache locality effects..
942 */
943 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
944 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
945 }
946
947 mutex_unlock(&vq->mutex);
7ea206cf
AH
948 return;
949
055f648c 950err_free:
7ea206cf 951 vhost_scsi_free_cmd(tv_cmd);
055f648c
AH
952err_cmd:
953 vhost_scsi_send_bad_target(vs, vq, head, out);
7ea206cf 954 mutex_unlock(&vq->mutex);
057cbf49
NB
955}
956
957static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
958{
101998f6 959 pr_debug("%s: The handling func for control queue.\n", __func__);
057cbf49
NB
960}
961
a6c9af87
AH
962static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
963 struct se_lun *lun, u32 event, u32 reason)
964{
965 struct tcm_vhost_evt *evt;
966
967 evt = tcm_vhost_allocate_evt(vs, event, reason);
968 if (!evt)
969 return;
970
971 if (tpg && lun) {
972 /* TODO: share lun setup code with virtio-scsi.ko */
973 /*
974 * Note: evt->event is zeroed when we allocate it and
975 * lun[4-7] need to be zero according to virtio-scsi spec.
976 */
977 evt->event.lun[0] = 0x01;
978 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
979 if (lun->unpacked_lun >= 256)
980 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
981 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
982 }
983
984 llist_add(&evt->list, &vs->vs_event_list);
985 vhost_work_queue(&vs->dev, &vs->vs_event_work);
986}
987
057cbf49
NB
988static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
989{
a6c9af87
AH
990 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
991 poll.work);
992 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
993
994 mutex_lock(&vq->mutex);
995 if (!vq->private_data)
996 goto out;
997
998 if (vs->vs_events_missed)
999 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1000out:
1001 mutex_unlock(&vq->mutex);
057cbf49
NB
1002}
1003
1004static void vhost_scsi_handle_kick(struct vhost_work *work)
1005{
1006 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1007 poll.work);
1008 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1009
1b7f390e 1010 vhost_scsi_handle_vq(vs, vq);
057cbf49
NB
1011}
1012
4f7f46d3
AH
1013static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1014{
3ab2e420 1015 vhost_poll_flush(&vs->vqs[index].vq.poll);
4f7f46d3
AH
1016}
1017
1018static void vhost_scsi_flush(struct vhost_scsi *vs)
1019{
f2f0173d 1020 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
4f7f46d3
AH
1021 int i;
1022
f2f0173d
AH
1023 /* Init new inflight and remember the old inflight */
1024 tcm_vhost_init_inflight(vs, old_inflight);
1025
1026 /*
1027 * The inflight->kref was initialized to 1. We decrement it here to
1028 * indicate the start of the flush operation so that it will reach 0
1029 * when all the reqs are finished.
1030 */
1031 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1032 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1033
1034 /* Flush both the vhost poll and vhost work */
4f7f46d3
AH
1035 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1036 vhost_scsi_flush_vq(vs, i);
1037 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
a6c9af87 1038 vhost_work_flush(&vs->dev, &vs->vs_event_work);
f2f0173d
AH
1039
1040 /* Wait for all reqs issued before the flush to be finished */
1041 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1042 wait_for_completion(&old_inflight[i]->comp);
4f7f46d3
AH
1043}
1044
057cbf49
NB
1045/*
1046 * Called from vhost_scsi_ioctl() context to walk the list of available
1047 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
f2b7daf5
AH
1048 *
1049 * The lock nesting rule is:
1050 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
057cbf49
NB
1051 */
1052static int vhost_scsi_set_endpoint(
1053 struct vhost_scsi *vs,
1054 struct vhost_scsi_target *t)
1055{
1056 struct tcm_vhost_tport *tv_tport;
1057 struct tcm_vhost_tpg *tv_tpg;
4f7f46d3
AH
1058 struct tcm_vhost_tpg **vs_tpg;
1059 struct vhost_virtqueue *vq;
1060 int index, ret, i, len;
67e18cf9 1061 bool match = false;
057cbf49 1062
f2b7daf5 1063 mutex_lock(&tcm_vhost_mutex);
057cbf49 1064 mutex_lock(&vs->dev.mutex);
f2b7daf5 1065
057cbf49
NB
1066 /* Verify that ring has been setup correctly. */
1067 for (index = 0; index < vs->dev.nvqs; ++index) {
1068 /* Verify that ring has been setup correctly. */
3ab2e420 1069 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
f2b7daf5
AH
1070 ret = -EFAULT;
1071 goto out;
057cbf49
NB
1072 }
1073 }
057cbf49 1074
4f7f46d3
AH
1075 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1076 vs_tpg = kzalloc(len, GFP_KERNEL);
1077 if (!vs_tpg) {
f2b7daf5
AH
1078 ret = -ENOMEM;
1079 goto out;
4f7f46d3
AH
1080 }
1081 if (vs->vs_tpg)
1082 memcpy(vs_tpg, vs->vs_tpg, len);
1083
057cbf49
NB
1084 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1085 mutex_lock(&tv_tpg->tv_tpg_mutex);
1086 if (!tv_tpg->tpg_nexus) {
1087 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1088 continue;
1089 }
101998f6 1090 if (tv_tpg->tv_tpg_vhost_count != 0) {
057cbf49
NB
1091 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1092 continue;
1093 }
1094 tv_tport = tv_tpg->tport;
1095
67e18cf9 1096 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
4f7f46d3 1097 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
4f7f46d3 1098 kfree(vs_tpg);
f2b7daf5
AH
1099 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1100 ret = -EEXIST;
1101 goto out;
101998f6 1102 }
67e18cf9 1103 tv_tpg->tv_tpg_vhost_count++;
a6c9af87 1104 tv_tpg->vhost_scsi = vs;
4f7f46d3 1105 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
057cbf49 1106 smp_mb__after_atomic_inc();
67e18cf9 1107 match = true;
057cbf49
NB
1108 }
1109 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1110 }
67e18cf9
AH
1111
1112 if (match) {
1113 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1114 sizeof(vs->vs_vhost_wwpn));
4f7f46d3 1115 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
3ab2e420 1116 vq = &vs->vqs[i].vq;
4f7f46d3
AH
1117 /* Flushing the vhost_work acts as synchronize_rcu */
1118 mutex_lock(&vq->mutex);
1119 rcu_assign_pointer(vq->private_data, vs_tpg);
dfd5d569 1120 vhost_init_used(vq);
4f7f46d3
AH
1121 mutex_unlock(&vq->mutex);
1122 }
67e18cf9
AH
1123 ret = 0;
1124 } else {
1125 ret = -EEXIST;
1126 }
1127
4f7f46d3
AH
1128 /*
1129 * Act as synchronize_rcu to make sure access to
1130 * old vs->vs_tpg is finished.
1131 */
1132 vhost_scsi_flush(vs);
1133 kfree(vs->vs_tpg);
1134 vs->vs_tpg = vs_tpg;
1135
f2b7daf5 1136out:
67e18cf9 1137 mutex_unlock(&vs->dev.mutex);
f2b7daf5 1138 mutex_unlock(&tcm_vhost_mutex);
67e18cf9 1139 return ret;
057cbf49
NB
1140}
1141
1142static int vhost_scsi_clear_endpoint(
1143 struct vhost_scsi *vs,
1144 struct vhost_scsi_target *t)
1145{
1146 struct tcm_vhost_tport *tv_tport;
1147 struct tcm_vhost_tpg *tv_tpg;
4f7f46d3
AH
1148 struct vhost_virtqueue *vq;
1149 bool match = false;
67e18cf9
AH
1150 int index, ret, i;
1151 u8 target;
057cbf49 1152
f2b7daf5 1153 mutex_lock(&tcm_vhost_mutex);
057cbf49
NB
1154 mutex_lock(&vs->dev.mutex);
1155 /* Verify that ring has been setup correctly. */
1156 for (index = 0; index < vs->dev.nvqs; ++index) {
3ab2e420 1157 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
101998f6 1158 ret = -EFAULT;
038e0af4 1159 goto err_dev;
057cbf49
NB
1160 }
1161 }
4f7f46d3
AH
1162
1163 if (!vs->vs_tpg) {
f2b7daf5
AH
1164 ret = 0;
1165 goto err_dev;
4f7f46d3
AH
1166 }
1167
67e18cf9
AH
1168 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1169 target = i;
67e18cf9
AH
1170 tv_tpg = vs->vs_tpg[target];
1171 if (!tv_tpg)
1172 continue;
1173
038e0af4 1174 mutex_lock(&tv_tpg->tv_tpg_mutex);
67e18cf9
AH
1175 tv_tport = tv_tpg->tport;
1176 if (!tv_tport) {
1177 ret = -ENODEV;
038e0af4 1178 goto err_tpg;
67e18cf9
AH
1179 }
1180
1181 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1182 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1183 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1184 tv_tport->tport_name, tv_tpg->tport_tpgt,
1185 t->vhost_wwpn, t->vhost_tpgt);
1186 ret = -EINVAL;
038e0af4 1187 goto err_tpg;
67e18cf9
AH
1188 }
1189 tv_tpg->tv_tpg_vhost_count--;
a6c9af87 1190 tv_tpg->vhost_scsi = NULL;
67e18cf9 1191 vs->vs_tpg[target] = NULL;
4f7f46d3 1192 match = true;
038e0af4 1193 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49 1194 }
4f7f46d3
AH
1195 if (match) {
1196 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
3ab2e420 1197 vq = &vs->vqs[i].vq;
4f7f46d3
AH
1198 /* Flushing the vhost_work acts as synchronize_rcu */
1199 mutex_lock(&vq->mutex);
1200 rcu_assign_pointer(vq->private_data, NULL);
1201 mutex_unlock(&vq->mutex);
1202 }
1203 }
1204 /*
1205 * Act as synchronize_rcu to make sure access to
1206 * old vs->vs_tpg is finished.
1207 */
1208 vhost_scsi_flush(vs);
1209 kfree(vs->vs_tpg);
1210 vs->vs_tpg = NULL;
a6c9af87 1211 WARN_ON(vs->vs_events_nr);
057cbf49 1212 mutex_unlock(&vs->dev.mutex);
f2b7daf5 1213 mutex_unlock(&tcm_vhost_mutex);
057cbf49 1214 return 0;
101998f6 1215
038e0af4
AH
1216err_tpg:
1217 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1218err_dev:
101998f6 1219 mutex_unlock(&vs->dev.mutex);
f2b7daf5 1220 mutex_unlock(&tcm_vhost_mutex);
101998f6 1221 return ret;
057cbf49
NB
1222}
1223
4f7f46d3
AH
1224static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1225{
1226 if (features & ~VHOST_SCSI_FEATURES)
1227 return -EOPNOTSUPP;
1228
1229 mutex_lock(&vs->dev.mutex);
1230 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1231 !vhost_log_access_ok(&vs->dev)) {
1232 mutex_unlock(&vs->dev.mutex);
1233 return -EFAULT;
1234 }
1235 vs->dev.acked_features = features;
1236 smp_wmb();
1237 vhost_scsi_flush(vs);
1238 mutex_unlock(&vs->dev.mutex);
1239 return 0;
1240}
1241
057cbf49
NB
1242static int vhost_scsi_open(struct inode *inode, struct file *f)
1243{
1244 struct vhost_scsi *s;
3ab2e420 1245 struct vhost_virtqueue **vqs;
1b7f390e 1246 int r, i;
057cbf49
NB
1247
1248 s = kzalloc(sizeof(*s), GFP_KERNEL);
1249 if (!s)
1250 return -ENOMEM;
1251
3ab2e420
AH
1252 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1253 if (!vqs) {
1254 kfree(s);
1255 return -ENOMEM;
1256 }
1257
057cbf49 1258 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
a6c9af87
AH
1259 vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1260
1261 s->vs_events_nr = 0;
1262 s->vs_events_missed = false;
057cbf49 1263
3ab2e420
AH
1264 vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1265 vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1266 s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1267 s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1268 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1269 vqs[i] = &s->vqs[i].vq;
1270 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1271 }
1272 r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
f2f0173d
AH
1273
1274 tcm_vhost_init_inflight(s, NULL);
1275
057cbf49 1276 if (r < 0) {
3ab2e420 1277 kfree(vqs);
057cbf49
NB
1278 kfree(s);
1279 return r;
1280 }
1281
1282 f->private_data = s;
1283 return 0;
1284}
1285
1286static int vhost_scsi_release(struct inode *inode, struct file *f)
1287{
1288 struct vhost_scsi *s = f->private_data;
67e18cf9 1289 struct vhost_scsi_target t;
057cbf49 1290
67e18cf9
AH
1291 mutex_lock(&s->dev.mutex);
1292 memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1293 mutex_unlock(&s->dev.mutex);
1294 vhost_scsi_clear_endpoint(s, &t);
b211616d 1295 vhost_dev_stop(&s->dev);
057cbf49 1296 vhost_dev_cleanup(&s->dev, false);
a6c9af87
AH
1297 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1298 vhost_scsi_flush(s);
3ab2e420 1299 kfree(s->dev.vqs);
057cbf49
NB
1300 kfree(s);
1301 return 0;
1302}
1303
057cbf49
NB
1304static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1305 unsigned long arg)
1306{
1307 struct vhost_scsi *vs = f->private_data;
1308 struct vhost_scsi_target backend;
1309 void __user *argp = (void __user *)arg;
1310 u64 __user *featurep = argp;
11c63418
AH
1311 u32 __user *eventsp = argp;
1312 u32 events_missed;
057cbf49 1313 u64 features;
101998f6 1314 int r, abi_version = VHOST_SCSI_ABI_VERSION;
3ab2e420 1315 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
057cbf49
NB
1316
1317 switch (ioctl) {
1318 case VHOST_SCSI_SET_ENDPOINT:
1319 if (copy_from_user(&backend, argp, sizeof backend))
1320 return -EFAULT;
6de7145c
MT
1321 if (backend.reserved != 0)
1322 return -EOPNOTSUPP;
057cbf49
NB
1323
1324 return vhost_scsi_set_endpoint(vs, &backend);
1325 case VHOST_SCSI_CLEAR_ENDPOINT:
1326 if (copy_from_user(&backend, argp, sizeof backend))
1327 return -EFAULT;
6de7145c
MT
1328 if (backend.reserved != 0)
1329 return -EOPNOTSUPP;
057cbf49
NB
1330
1331 return vhost_scsi_clear_endpoint(vs, &backend);
1332 case VHOST_SCSI_GET_ABI_VERSION:
101998f6 1333 if (copy_to_user(argp, &abi_version, sizeof abi_version))
057cbf49
NB
1334 return -EFAULT;
1335 return 0;
11c63418
AH
1336 case VHOST_SCSI_SET_EVENTS_MISSED:
1337 if (get_user(events_missed, eventsp))
1338 return -EFAULT;
1339 mutex_lock(&vq->mutex);
1340 vs->vs_events_missed = events_missed;
1341 mutex_unlock(&vq->mutex);
1342 return 0;
1343 case VHOST_SCSI_GET_EVENTS_MISSED:
1344 mutex_lock(&vq->mutex);
1345 events_missed = vs->vs_events_missed;
1346 mutex_unlock(&vq->mutex);
1347 if (put_user(events_missed, eventsp))
1348 return -EFAULT;
1349 return 0;
057cbf49 1350 case VHOST_GET_FEATURES:
5dade710 1351 features = VHOST_SCSI_FEATURES;
057cbf49
NB
1352 if (copy_to_user(featurep, &features, sizeof features))
1353 return -EFAULT;
1354 return 0;
1355 case VHOST_SET_FEATURES:
1356 if (copy_from_user(&features, featurep, sizeof features))
1357 return -EFAULT;
1358 return vhost_scsi_set_features(vs, features);
1359 default:
1360 mutex_lock(&vs->dev.mutex);
935cdee7
MT
1361 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1362 /* TODO: flush backend after dev ioctl. */
1363 if (r == -ENOIOCTLCMD)
1364 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
057cbf49
NB
1365 mutex_unlock(&vs->dev.mutex);
1366 return r;
1367 }
1368}
1369
101998f6
NB
1370#ifdef CONFIG_COMPAT
1371static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1372 unsigned long arg)
1373{
1374 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1375}
1376#endif
1377
057cbf49
NB
1378static const struct file_operations vhost_scsi_fops = {
1379 .owner = THIS_MODULE,
1380 .release = vhost_scsi_release,
1381 .unlocked_ioctl = vhost_scsi_ioctl,
101998f6
NB
1382#ifdef CONFIG_COMPAT
1383 .compat_ioctl = vhost_scsi_compat_ioctl,
1384#endif
057cbf49
NB
1385 .open = vhost_scsi_open,
1386 .llseek = noop_llseek,
1387};
1388
1389static struct miscdevice vhost_scsi_misc = {
1390 MISC_DYNAMIC_MINOR,
1391 "vhost-scsi",
1392 &vhost_scsi_fops,
1393};
1394
1395static int __init vhost_scsi_register(void)
1396{
1397 return misc_register(&vhost_scsi_misc);
1398}
1399
1400static int vhost_scsi_deregister(void)
1401{
1402 return misc_deregister(&vhost_scsi_misc);
1403}
1404
1405static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1406{
1407 switch (tport->tport_proto_id) {
1408 case SCSI_PROTOCOL_SAS:
1409 return "SAS";
1410 case SCSI_PROTOCOL_FCP:
1411 return "FCP";
1412 case SCSI_PROTOCOL_ISCSI:
1413 return "iSCSI";
1414 default:
1415 break;
1416 }
1417
1418 return "Unknown";
1419}
1420
a6c9af87
AH
1421static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1422 struct se_lun *lun, bool plug)
1423{
1424
1425 struct vhost_scsi *vs = tpg->vhost_scsi;
1426 struct vhost_virtqueue *vq;
1427 u32 reason;
1428
1429 if (!vs)
1430 return;
1431
1432 mutex_lock(&vs->dev.mutex);
1433 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1434 mutex_unlock(&vs->dev.mutex);
1435 return;
1436 }
1437
1438 if (plug)
1439 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1440 else
1441 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1442
3ab2e420 1443 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87
AH
1444 mutex_lock(&vq->mutex);
1445 tcm_vhost_send_evt(vs, tpg, lun,
1446 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1447 mutex_unlock(&vq->mutex);
1448 mutex_unlock(&vs->dev.mutex);
1449}
1450
1451static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1452{
1453 tcm_vhost_do_plug(tpg, lun, true);
1454}
1455
1456static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1457{
1458 tcm_vhost_do_plug(tpg, lun, false);
1459}
1460
101998f6 1461static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
057cbf49
NB
1462 struct se_lun *lun)
1463{
1464 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1465 struct tcm_vhost_tpg, se_tpg);
1466
a6c9af87
AH
1467 mutex_lock(&tcm_vhost_mutex);
1468
101998f6
NB
1469 mutex_lock(&tv_tpg->tv_tpg_mutex);
1470 tv_tpg->tv_tpg_port_count++;
1471 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49 1472
a6c9af87
AH
1473 tcm_vhost_hotplug(tv_tpg, lun);
1474
1475 mutex_unlock(&tcm_vhost_mutex);
1476
057cbf49
NB
1477 return 0;
1478}
1479
101998f6 1480static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
a6c9af87 1481 struct se_lun *lun)
057cbf49
NB
1482{
1483 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1484 struct tcm_vhost_tpg, se_tpg);
1485
a6c9af87
AH
1486 mutex_lock(&tcm_vhost_mutex);
1487
101998f6
NB
1488 mutex_lock(&tv_tpg->tv_tpg_mutex);
1489 tv_tpg->tv_tpg_port_count--;
1490 mutex_unlock(&tv_tpg->tv_tpg_mutex);
a6c9af87
AH
1491
1492 tcm_vhost_hotunplug(tv_tpg, lun);
1493
1494 mutex_unlock(&tcm_vhost_mutex);
057cbf49
NB
1495}
1496
1497static struct se_node_acl *tcm_vhost_make_nodeacl(
1498 struct se_portal_group *se_tpg,
1499 struct config_group *group,
1500 const char *name)
1501{
1502 struct se_node_acl *se_nacl, *se_nacl_new;
1503 struct tcm_vhost_nacl *nacl;
1504 u64 wwpn = 0;
1505 u32 nexus_depth;
1506
1507 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1508 return ERR_PTR(-EINVAL); */
1509 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1510 if (!se_nacl_new)
1511 return ERR_PTR(-ENOMEM);
1512
1513 nexus_depth = 1;
1514 /*
1515 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1516 * when converting a NodeACL from demo mode -> explict
1517 */
1518 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1519 name, nexus_depth);
1520 if (IS_ERR(se_nacl)) {
1521 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1522 return se_nacl;
1523 }
1524 /*
1525 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1526 */
1527 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1528 nacl->iport_wwpn = wwpn;
1529
1530 return se_nacl;
1531}
1532
1533static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1534{
1535 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1536 struct tcm_vhost_nacl, se_node_acl);
1537 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1538 kfree(nacl);
1539}
1540
101998f6 1541static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
057cbf49
NB
1542 const char *name)
1543{
1544 struct se_portal_group *se_tpg;
1545 struct tcm_vhost_nexus *tv_nexus;
1546
1547 mutex_lock(&tv_tpg->tv_tpg_mutex);
1548 if (tv_tpg->tpg_nexus) {
1549 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1550 pr_debug("tv_tpg->tpg_nexus already exists\n");
1551 return -EEXIST;
1552 }
1553 se_tpg = &tv_tpg->se_tpg;
1554
1555 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1556 if (!tv_nexus) {
1557 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1558 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1559 return -ENOMEM;
1560 }
1561 /*
1562 * Initialize the struct se_session pointer
1563 */
1564 tv_nexus->tvn_se_sess = transport_init_session();
1565 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1566 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1567 kfree(tv_nexus);
1568 return -ENOMEM;
1569 }
1570 /*
1571 * Since we are running in 'demo mode' this call with generate a
1572 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1573 * the SCSI Initiator port name of the passed configfs group 'name'.
1574 */
1575 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1576 se_tpg, (unsigned char *)name);
1577 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1578 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1579 pr_debug("core_tpg_check_initiator_node_acl() failed"
1580 " for %s\n", name);
1581 transport_free_session(tv_nexus->tvn_se_sess);
1582 kfree(tv_nexus);
1583 return -ENOMEM;
1584 }
1585 /*
101998f6 1586 * Now register the TCM vhost virtual I_T Nexus as active with the
057cbf49
NB
1587 * call to __transport_register_session()
1588 */
1589 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1590 tv_nexus->tvn_se_sess, tv_nexus);
1591 tv_tpg->tpg_nexus = tv_nexus;
1592
1593 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1594 return 0;
1595}
1596
101998f6 1597static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
057cbf49
NB
1598{
1599 struct se_session *se_sess;
1600 struct tcm_vhost_nexus *tv_nexus;
1601
1602 mutex_lock(&tpg->tv_tpg_mutex);
1603 tv_nexus = tpg->tpg_nexus;
1604 if (!tv_nexus) {
1605 mutex_unlock(&tpg->tv_tpg_mutex);
1606 return -ENODEV;
1607 }
1608
1609 se_sess = tv_nexus->tvn_se_sess;
1610 if (!se_sess) {
1611 mutex_unlock(&tpg->tv_tpg_mutex);
1612 return -ENODEV;
1613 }
1614
101998f6 1615 if (tpg->tv_tpg_port_count != 0) {
057cbf49 1616 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1617 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1618 " active TPG port count: %d\n",
101998f6
NB
1619 tpg->tv_tpg_port_count);
1620 return -EBUSY;
057cbf49
NB
1621 }
1622
101998f6 1623 if (tpg->tv_tpg_vhost_count != 0) {
057cbf49 1624 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1625 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1626 " active TPG vhost count: %d\n",
101998f6
NB
1627 tpg->tv_tpg_vhost_count);
1628 return -EBUSY;
057cbf49
NB
1629 }
1630
101998f6 1631 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
057cbf49
NB
1632 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1633 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1634 /*
101998f6 1635 * Release the SCSI I_T Nexus to the emulated vhost Target Port
057cbf49
NB
1636 */
1637 transport_deregister_session(tv_nexus->tvn_se_sess);
1638 tpg->tpg_nexus = NULL;
1639 mutex_unlock(&tpg->tv_tpg_mutex);
1640
1641 kfree(tv_nexus);
1642 return 0;
1643}
1644
101998f6 1645static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1646 char *page)
1647{
1648 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1649 struct tcm_vhost_tpg, se_tpg);
1650 struct tcm_vhost_nexus *tv_nexus;
1651 ssize_t ret;
1652
1653 mutex_lock(&tv_tpg->tv_tpg_mutex);
1654 tv_nexus = tv_tpg->tpg_nexus;
1655 if (!tv_nexus) {
1656 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1657 return -ENODEV;
1658 }
1659 ret = snprintf(page, PAGE_SIZE, "%s\n",
1660 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1661 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1662
1663 return ret;
1664}
1665
101998f6 1666static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1667 const char *page,
1668 size_t count)
1669{
1670 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1671 struct tcm_vhost_tpg, se_tpg);
1672 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1673 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1674 int ret;
1675 /*
1676 * Shutdown the active I_T nexus if 'NULL' is passed..
1677 */
1678 if (!strncmp(page, "NULL", 4)) {
1679 ret = tcm_vhost_drop_nexus(tv_tpg);
1680 return (!ret) ? count : ret;
1681 }
1682 /*
1683 * Otherwise make sure the passed virtual Initiator port WWN matches
1684 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1685 * tcm_vhost_make_nexus().
1686 */
1687 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1688 pr_err("Emulated NAA Sas Address: %s, exceeds"
1689 " max: %d\n", page, TCM_VHOST_NAMELEN);
1690 return -EINVAL;
1691 }
1692 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1693
1694 ptr = strstr(i_port, "naa.");
1695 if (ptr) {
1696 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1697 pr_err("Passed SAS Initiator Port %s does not"
1698 " match target port protoid: %s\n", i_port,
1699 tcm_vhost_dump_proto_id(tport_wwn));
1700 return -EINVAL;
1701 }
1702 port_ptr = &i_port[0];
1703 goto check_newline;
1704 }
1705 ptr = strstr(i_port, "fc.");
1706 if (ptr) {
1707 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1708 pr_err("Passed FCP Initiator Port %s does not"
1709 " match target port protoid: %s\n", i_port,
1710 tcm_vhost_dump_proto_id(tport_wwn));
1711 return -EINVAL;
1712 }
1713 port_ptr = &i_port[3]; /* Skip over "fc." */
1714 goto check_newline;
1715 }
1716 ptr = strstr(i_port, "iqn.");
1717 if (ptr) {
1718 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1719 pr_err("Passed iSCSI Initiator Port %s does not"
1720 " match target port protoid: %s\n", i_port,
1721 tcm_vhost_dump_proto_id(tport_wwn));
1722 return -EINVAL;
1723 }
1724 port_ptr = &i_port[0];
1725 goto check_newline;
1726 }
1727 pr_err("Unable to locate prefix for emulated Initiator Port:"
1728 " %s\n", i_port);
1729 return -EINVAL;
1730 /*
1731 * Clear any trailing newline for the NAA WWN
1732 */
1733check_newline:
1734 if (i_port[strlen(i_port)-1] == '\n')
1735 i_port[strlen(i_port)-1] = '\0';
1736
1737 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1738 if (ret < 0)
1739 return ret;
1740
1741 return count;
1742}
1743
1744TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1745
1746static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1747 &tcm_vhost_tpg_nexus.attr,
1748 NULL,
1749};
1750
101998f6 1751static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
057cbf49
NB
1752 struct config_group *group,
1753 const char *name)
1754{
1755 struct tcm_vhost_tport *tport = container_of(wwn,
1756 struct tcm_vhost_tport, tport_wwn);
1757
1758 struct tcm_vhost_tpg *tpg;
1759 unsigned long tpgt;
1760 int ret;
1761
1762 if (strstr(name, "tpgt_") != name)
1763 return ERR_PTR(-EINVAL);
1764 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1765 return ERR_PTR(-EINVAL);
1766
1767 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1768 if (!tpg) {
1769 pr_err("Unable to allocate struct tcm_vhost_tpg");
1770 return ERR_PTR(-ENOMEM);
1771 }
1772 mutex_init(&tpg->tv_tpg_mutex);
1773 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1774 tpg->tport = tport;
1775 tpg->tport_tpgt = tpgt;
1776
1777 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1778 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1779 if (ret < 0) {
1780 kfree(tpg);
1781 return NULL;
1782 }
1783 mutex_lock(&tcm_vhost_mutex);
1784 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1785 mutex_unlock(&tcm_vhost_mutex);
1786
1787 return &tpg->se_tpg;
1788}
1789
1790static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1791{
1792 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1793 struct tcm_vhost_tpg, se_tpg);
1794
1795 mutex_lock(&tcm_vhost_mutex);
1796 list_del(&tpg->tv_tpg_list);
1797 mutex_unlock(&tcm_vhost_mutex);
1798 /*
101998f6 1799 * Release the virtual I_T Nexus for this vhost TPG
057cbf49
NB
1800 */
1801 tcm_vhost_drop_nexus(tpg);
1802 /*
1803 * Deregister the se_tpg from TCM..
1804 */
1805 core_tpg_deregister(se_tpg);
1806 kfree(tpg);
1807}
1808
101998f6 1809static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
057cbf49
NB
1810 struct config_group *group,
1811 const char *name)
1812{
1813 struct tcm_vhost_tport *tport;
1814 char *ptr;
1815 u64 wwpn = 0;
1816 int off = 0;
1817
1818 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1819 return ERR_PTR(-EINVAL); */
1820
1821 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1822 if (!tport) {
1823 pr_err("Unable to allocate struct tcm_vhost_tport");
1824 return ERR_PTR(-ENOMEM);
1825 }
1826 tport->tport_wwpn = wwpn;
1827 /*
1828 * Determine the emulated Protocol Identifier and Target Port Name
1829 * based on the incoming configfs directory name.
1830 */
1831 ptr = strstr(name, "naa.");
1832 if (ptr) {
1833 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1834 goto check_len;
1835 }
1836 ptr = strstr(name, "fc.");
1837 if (ptr) {
1838 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1839 off = 3; /* Skip over "fc." */
1840 goto check_len;
1841 }
1842 ptr = strstr(name, "iqn.");
1843 if (ptr) {
1844 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1845 goto check_len;
1846 }
1847
1848 pr_err("Unable to locate prefix for emulated Target Port:"
1849 " %s\n", name);
1850 kfree(tport);
1851 return ERR_PTR(-EINVAL);
1852
1853check_len:
1854 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1855 pr_err("Emulated %s Address: %s, exceeds"
1856 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1857 TCM_VHOST_NAMELEN);
1858 kfree(tport);
1859 return ERR_PTR(-EINVAL);
1860 }
1861 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1862
1863 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1864 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1865
1866 return &tport->tport_wwn;
1867}
1868
1869static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1870{
1871 struct tcm_vhost_tport *tport = container_of(wwn,
1872 struct tcm_vhost_tport, tport_wwn);
1873
1874 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1875 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1876 tport->tport_name);
1877
1878 kfree(tport);
1879}
1880
1881static ssize_t tcm_vhost_wwn_show_attr_version(
1882 struct target_fabric_configfs *tf,
1883 char *page)
1884{
1885 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1886 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1887 utsname()->machine);
1888}
1889
1890TF_WWN_ATTR_RO(tcm_vhost, version);
1891
1892static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1893 &tcm_vhost_wwn_version.attr,
1894 NULL,
1895};
1896
1897static struct target_core_fabric_ops tcm_vhost_ops = {
1898 .get_fabric_name = tcm_vhost_get_fabric_name,
1899 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1900 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1901 .tpg_get_tag = tcm_vhost_get_tag,
1902 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1903 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
1904 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
1905 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
1906 .tpg_check_demo_mode = tcm_vhost_check_true,
1907 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
1908 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1909 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1910 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
1911 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
1912 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
1913 .release_cmd = tcm_vhost_release_cmd,
1914 .shutdown_session = tcm_vhost_shutdown_session,
1915 .close_session = tcm_vhost_close_session,
1916 .sess_get_index = tcm_vhost_sess_get_index,
1917 .sess_get_initiator_sid = NULL,
1918 .write_pending = tcm_vhost_write_pending,
1919 .write_pending_status = tcm_vhost_write_pending_status,
1920 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
1921 .get_task_tag = tcm_vhost_get_task_tag,
1922 .get_cmd_state = tcm_vhost_get_cmd_state,
1923 .queue_data_in = tcm_vhost_queue_data_in,
1924 .queue_status = tcm_vhost_queue_status,
1925 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
057cbf49
NB
1926 /*
1927 * Setup callers for generic logic in target_core_fabric_configfs.c
1928 */
1929 .fabric_make_wwn = tcm_vhost_make_tport,
1930 .fabric_drop_wwn = tcm_vhost_drop_tport,
1931 .fabric_make_tpg = tcm_vhost_make_tpg,
1932 .fabric_drop_tpg = tcm_vhost_drop_tpg,
1933 .fabric_post_link = tcm_vhost_port_link,
1934 .fabric_pre_unlink = tcm_vhost_port_unlink,
1935 .fabric_make_np = NULL,
1936 .fabric_drop_np = NULL,
1937 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
1938 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
1939};
1940
1941static int tcm_vhost_register_configfs(void)
1942{
1943 struct target_fabric_configfs *fabric;
1944 int ret;
1945
1946 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1947 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1948 utsname()->machine);
1949 /*
1950 * Register the top level struct config_item_type with TCM core
1951 */
1952 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1953 if (IS_ERR(fabric)) {
1954 pr_err("target_fabric_configfs_init() failed\n");
1955 return PTR_ERR(fabric);
1956 }
1957 /*
1958 * Setup fabric->tf_ops from our local tcm_vhost_ops
1959 */
1960 fabric->tf_ops = tcm_vhost_ops;
1961 /*
1962 * Setup default attribute lists for various fabric->tf_cit_tmpl
1963 */
1964 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1965 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1966 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1967 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1968 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1969 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1970 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1971 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1972 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1973 /*
1974 * Register the fabric for use within TCM
1975 */
1976 ret = target_fabric_configfs_register(fabric);
1977 if (ret < 0) {
1978 pr_err("target_fabric_configfs_register() failed"
1979 " for TCM_VHOST\n");
1980 return ret;
1981 }
1982 /*
1983 * Setup our local pointer to *fabric
1984 */
1985 tcm_vhost_fabric_configfs = fabric;
1986 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1987 return 0;
1988};
1989
1990static void tcm_vhost_deregister_configfs(void)
1991{
1992 if (!tcm_vhost_fabric_configfs)
1993 return;
1994
1995 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
1996 tcm_vhost_fabric_configfs = NULL;
1997 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
1998};
1999
2000static int __init tcm_vhost_init(void)
2001{
2002 int ret = -ENOMEM;
101998f6
NB
2003 /*
2004 * Use our own dedicated workqueue for submitting I/O into
2005 * target core to avoid contention within system_wq.
2006 */
057cbf49
NB
2007 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2008 if (!tcm_vhost_workqueue)
2009 goto out;
2010
2011 ret = vhost_scsi_register();
2012 if (ret < 0)
2013 goto out_destroy_workqueue;
2014
2015 ret = tcm_vhost_register_configfs();
2016 if (ret < 0)
2017 goto out_vhost_scsi_deregister;
2018
2019 return 0;
2020
2021out_vhost_scsi_deregister:
2022 vhost_scsi_deregister();
2023out_destroy_workqueue:
2024 destroy_workqueue(tcm_vhost_workqueue);
2025out:
2026 return ret;
2027};
2028
2029static void tcm_vhost_exit(void)
2030{
2031 tcm_vhost_deregister_configfs();
2032 vhost_scsi_deregister();
2033 destroy_workqueue(tcm_vhost_workqueue);
2034};
2035
2036MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
2037MODULE_LICENSE("GPL");
2038module_init(tcm_vhost_init);
2039module_exit(tcm_vhost_exit);
This page took 0.14701 seconds and 5 git commands to generate.