Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[deliverable/linux.git] / drivers / target / target_core_user.c
1 /*
2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <linux/spinlock.h>
21 #include <linux/module.h>
22 #include <linux/idr.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/parser.h>
26 #include <linux/vmalloc.h>
27 #include <linux/uio_driver.h>
28 #include <net/genetlink.h>
29 #include <scsi/scsi_common.h>
30 #include <scsi/scsi_proto.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_fabric.h>
33 #include <target/target_core_backend.h>
34
35 #include <linux/target_core_user.h>
36
37 /*
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
41 *
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
44 *
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
48 * internal to the mmap()ed area. There is separate space outside the
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
52 *
53 * SECURITY:
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
60 */
61
62
63 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
64
65 #define CMDR_SIZE (16 * 4096)
66 #define DATA_SIZE (257 * 4096)
67
68 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
69
70 static struct device *tcmu_root_device;
71
72 struct tcmu_hba {
73 u32 host_id;
74 };
75
76 #define TCMU_CONFIG_LEN 256
77
78 struct tcmu_dev {
79 struct se_device se_dev;
80
81 char *name;
82 struct se_hba *hba;
83
84 #define TCMU_DEV_BIT_OPEN 0
85 #define TCMU_DEV_BIT_BROKEN 1
86 unsigned long flags;
87
88 struct uio_info uio_info;
89
90 struct tcmu_mailbox *mb_addr;
91 size_t dev_size;
92 u32 cmdr_size;
93 u32 cmdr_last_cleaned;
94 /* Offset of data ring from start of mb */
95 size_t data_off;
96 size_t data_size;
97 /* Ring head + tail values. */
98 /* Must add data_off and mb_addr to get the address */
99 size_t data_head;
100 size_t data_tail;
101
102 wait_queue_head_t wait_cmdr;
103 /* TODO should this be a mutex? */
104 spinlock_t cmdr_lock;
105
106 struct idr commands;
107 spinlock_t commands_lock;
108
109 struct timer_list timeout;
110
111 char dev_config[TCMU_CONFIG_LEN];
112 };
113
114 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
115
116 #define CMDR_OFF sizeof(struct tcmu_mailbox)
117
118 struct tcmu_cmd {
119 struct se_cmd *se_cmd;
120 struct tcmu_dev *tcmu_dev;
121
122 uint16_t cmd_id;
123
124 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
125 cmd has been completed then accessing se_cmd is off limits */
126 size_t data_length;
127
128 unsigned long deadline;
129
130 #define TCMU_CMD_BIT_EXPIRED 0
131 unsigned long flags;
132 };
133
134 static struct kmem_cache *tcmu_cmd_cache;
135
136 /* multicast group */
137 enum tcmu_multicast_groups {
138 TCMU_MCGRP_CONFIG,
139 };
140
141 static const struct genl_multicast_group tcmu_mcgrps[] = {
142 [TCMU_MCGRP_CONFIG] = { .name = "config", },
143 };
144
145 /* Our generic netlink family */
146 static struct genl_family tcmu_genl_family = {
147 .id = GENL_ID_GENERATE,
148 .hdrsize = 0,
149 .name = "TCM-USER",
150 .version = 1,
151 .maxattr = TCMU_ATTR_MAX,
152 .mcgrps = tcmu_mcgrps,
153 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
154 };
155
156 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
157 {
158 struct se_device *se_dev = se_cmd->se_dev;
159 struct tcmu_dev *udev = TCMU_DEV(se_dev);
160 struct tcmu_cmd *tcmu_cmd;
161 int cmd_id;
162
163 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
164 if (!tcmu_cmd)
165 return NULL;
166
167 tcmu_cmd->se_cmd = se_cmd;
168 tcmu_cmd->tcmu_dev = udev;
169 tcmu_cmd->data_length = se_cmd->data_length;
170
171 if (se_cmd->se_cmd_flags & SCF_BIDI) {
172 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
173 tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
174 }
175
176 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
177
178 idr_preload(GFP_KERNEL);
179 spin_lock_irq(&udev->commands_lock);
180 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
181 USHRT_MAX, GFP_NOWAIT);
182 spin_unlock_irq(&udev->commands_lock);
183 idr_preload_end();
184
185 if (cmd_id < 0) {
186 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
187 return NULL;
188 }
189 tcmu_cmd->cmd_id = cmd_id;
190
191 return tcmu_cmd;
192 }
193
194 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
195 {
196 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
197
198 size = round_up(size+offset, PAGE_SIZE);
199 vaddr -= offset;
200
201 while (size) {
202 flush_dcache_page(virt_to_page(vaddr));
203 size -= PAGE_SIZE;
204 }
205 }
206
207 /*
208 * Some ring helper functions. We don't assume size is a power of 2 so
209 * we can't use circ_buf.h.
210 */
211 static inline size_t spc_used(size_t head, size_t tail, size_t size)
212 {
213 int diff = head - tail;
214
215 if (diff >= 0)
216 return diff;
217 else
218 return size + diff;
219 }
220
221 static inline size_t spc_free(size_t head, size_t tail, size_t size)
222 {
223 /* Keep 1 byte unused or we can't tell full from empty */
224 return (size - spc_used(head, tail, size) - 1);
225 }
226
227 static inline size_t head_to_end(size_t head, size_t size)
228 {
229 return size - head;
230 }
231
232 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
233
234 static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
235 struct scatterlist *data_sg, unsigned int data_nents,
236 struct iovec **iov, int *iov_cnt, bool copy_data)
237 {
238 int i;
239 void *from, *to;
240 size_t copy_bytes;
241 struct scatterlist *sg;
242
243 for_each_sg(data_sg, sg, data_nents, i) {
244 copy_bytes = min_t(size_t, sg->length,
245 head_to_end(udev->data_head, udev->data_size));
246 from = kmap_atomic(sg_page(sg)) + sg->offset;
247 to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
248
249 if (copy_data) {
250 memcpy(to, from, copy_bytes);
251 tcmu_flush_dcache_range(to, copy_bytes);
252 }
253
254 /* Even iov_base is relative to mb_addr */
255 (*iov)->iov_len = copy_bytes;
256 (*iov)->iov_base = (void __user *) udev->data_off +
257 udev->data_head;
258 (*iov_cnt)++;
259 (*iov)++;
260
261 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
262
263 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
264 if (sg->length != copy_bytes) {
265 void *from_skip = from + copy_bytes;
266
267 copy_bytes = sg->length - copy_bytes;
268
269 (*iov)->iov_len = copy_bytes;
270 (*iov)->iov_base = (void __user *) udev->data_off +
271 udev->data_head;
272
273 if (copy_data) {
274 to = (void *) udev->mb_addr +
275 udev->data_off + udev->data_head;
276 memcpy(to, from_skip, copy_bytes);
277 tcmu_flush_dcache_range(to, copy_bytes);
278 }
279
280 (*iov_cnt)++;
281 (*iov)++;
282
283 UPDATE_HEAD(udev->data_head,
284 copy_bytes, udev->data_size);
285 }
286
287 kunmap_atomic(from - sg->offset);
288 }
289 }
290
291 static void gather_and_free_data_area(struct tcmu_dev *udev,
292 struct scatterlist *data_sg, unsigned int data_nents)
293 {
294 int i;
295 void *from, *to;
296 size_t copy_bytes;
297 struct scatterlist *sg;
298
299 /* It'd be easier to look at entry's iovec again, but UAM */
300 for_each_sg(data_sg, sg, data_nents, i) {
301 copy_bytes = min_t(size_t, sg->length,
302 head_to_end(udev->data_tail, udev->data_size));
303
304 to = kmap_atomic(sg_page(sg)) + sg->offset;
305 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
306 from = (void *) udev->mb_addr +
307 udev->data_off + udev->data_tail;
308 tcmu_flush_dcache_range(from, copy_bytes);
309 memcpy(to, from, copy_bytes);
310
311 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
312
313 /* Uh oh, wrapped the data buffer for this sg's data */
314 if (sg->length != copy_bytes) {
315 void *to_skip = to + copy_bytes;
316
317 from = (void *) udev->mb_addr +
318 udev->data_off + udev->data_tail;
319 WARN_ON(udev->data_tail);
320 copy_bytes = sg->length - copy_bytes;
321 tcmu_flush_dcache_range(from, copy_bytes);
322 memcpy(to_skip, from, copy_bytes);
323
324 UPDATE_HEAD(udev->data_tail,
325 copy_bytes, udev->data_size);
326 }
327 kunmap_atomic(to - sg->offset);
328 }
329 }
330
331 /*
332 * We can't queue a command until we have space available on the cmd ring *and*
333 * space available on the data ring.
334 *
335 * Called with ring lock held.
336 */
337 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
338 {
339 struct tcmu_mailbox *mb = udev->mb_addr;
340 size_t space;
341 u32 cmd_head;
342 size_t cmd_needed;
343
344 tcmu_flush_dcache_range(mb, sizeof(*mb));
345
346 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
347
348 /*
349 * If cmd end-of-ring space is too small then we need space for a NOP plus
350 * original cmd - cmds are internally contiguous.
351 */
352 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
353 cmd_needed = cmd_size;
354 else
355 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
356
357 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
358 if (space < cmd_needed) {
359 pr_debug("no cmd space: %u %u %u\n", cmd_head,
360 udev->cmdr_last_cleaned, udev->cmdr_size);
361 return false;
362 }
363
364 space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
365 if (space < data_needed) {
366 pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
367 udev->data_tail, udev->data_size);
368 return false;
369 }
370
371 return true;
372 }
373
374 static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
375 {
376 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
377 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
378 size_t base_command_size, command_size;
379 struct tcmu_mailbox *mb;
380 struct tcmu_cmd_entry *entry;
381 struct iovec *iov;
382 int iov_cnt;
383 uint32_t cmd_head;
384 uint64_t cdb_off;
385 bool copy_to_data_area;
386
387 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
388 return -EINVAL;
389
390 /*
391 * Must be a certain minimum size for response sense info, but
392 * also may be larger if the iov array is large.
393 *
394 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
395 * b/c size == offsetof one-past-element.
396 */
397 base_command_size = max(offsetof(struct tcmu_cmd_entry,
398 req.iov[se_cmd->t_bidi_data_nents +
399 se_cmd->t_data_nents + 2]),
400 sizeof(struct tcmu_cmd_entry));
401 command_size = base_command_size
402 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
403
404 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
405
406 spin_lock_irq(&udev->cmdr_lock);
407
408 mb = udev->mb_addr;
409 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
410 if ((command_size > (udev->cmdr_size / 2))
411 || tcmu_cmd->data_length > (udev->data_size - 1))
412 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
413 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
414 udev->cmdr_size, udev->data_size);
415
416 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
417 int ret;
418 DEFINE_WAIT(__wait);
419
420 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
421
422 pr_debug("sleeping for ring space\n");
423 spin_unlock_irq(&udev->cmdr_lock);
424 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
425 finish_wait(&udev->wait_cmdr, &__wait);
426 if (!ret) {
427 pr_warn("tcmu: command timed out\n");
428 return -ETIMEDOUT;
429 }
430
431 spin_lock_irq(&udev->cmdr_lock);
432
433 /* We dropped cmdr_lock, cmd_head is stale */
434 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
435 }
436
437 /* Insert a PAD if end-of-ring space is too small */
438 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
439 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
440
441 entry = (void *) mb + CMDR_OFF + cmd_head;
442 tcmu_flush_dcache_range(entry, sizeof(*entry));
443 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
444 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
445 entry->hdr.cmd_id = 0; /* not used for PAD */
446 entry->hdr.kflags = 0;
447 entry->hdr.uflags = 0;
448
449 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
450
451 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
452 WARN_ON(cmd_head != 0);
453 }
454
455 entry = (void *) mb + CMDR_OFF + cmd_head;
456 tcmu_flush_dcache_range(entry, sizeof(*entry));
457 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
458 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
459 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
460 entry->hdr.kflags = 0;
461 entry->hdr.uflags = 0;
462
463 /*
464 * Fix up iovecs, and handle if allocation in data ring wrapped.
465 */
466 iov = &entry->req.iov[0];
467 iov_cnt = 0;
468 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
469 || se_cmd->se_cmd_flags & SCF_BIDI);
470 alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
471 se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
472 entry->req.iov_cnt = iov_cnt;
473 entry->req.iov_dif_cnt = 0;
474
475 /* Handle BIDI commands */
476 iov_cnt = 0;
477 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
478 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
479 entry->req.iov_bidi_cnt = iov_cnt;
480
481 /* All offsets relative to mb_addr, not start of entry! */
482 cdb_off = CMDR_OFF + cmd_head + base_command_size;
483 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
484 entry->req.cdb_off = cdb_off;
485 tcmu_flush_dcache_range(entry, sizeof(*entry));
486
487 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
488 tcmu_flush_dcache_range(mb, sizeof(*mb));
489
490 spin_unlock_irq(&udev->cmdr_lock);
491
492 /* TODO: only if FLUSH and FUA? */
493 uio_event_notify(&udev->uio_info);
494
495 mod_timer(&udev->timeout,
496 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
497
498 return 0;
499 }
500
501 static int tcmu_queue_cmd(struct se_cmd *se_cmd)
502 {
503 struct se_device *se_dev = se_cmd->se_dev;
504 struct tcmu_dev *udev = TCMU_DEV(se_dev);
505 struct tcmu_cmd *tcmu_cmd;
506 int ret;
507
508 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
509 if (!tcmu_cmd)
510 return -ENOMEM;
511
512 ret = tcmu_queue_cmd_ring(tcmu_cmd);
513 if (ret < 0) {
514 pr_err("TCMU: Could not queue command\n");
515 spin_lock_irq(&udev->commands_lock);
516 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
517 spin_unlock_irq(&udev->commands_lock);
518
519 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
520 }
521
522 return ret;
523 }
524
525 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
526 {
527 struct se_cmd *se_cmd = cmd->se_cmd;
528 struct tcmu_dev *udev = cmd->tcmu_dev;
529
530 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
531 /* cmd has been completed already from timeout, just reclaim data
532 ring space */
533 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
534 return;
535 }
536
537 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
538 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
539 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
540 cmd->se_cmd);
541 transport_generic_request_failure(cmd->se_cmd,
542 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
543 cmd->se_cmd = NULL;
544 kmem_cache_free(tcmu_cmd_cache, cmd);
545 return;
546 }
547
548 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
549 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
550 se_cmd->scsi_sense_length);
551
552 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
553 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
554 /* Discard data_out buffer */
555 UPDATE_HEAD(udev->data_tail,
556 (size_t)se_cmd->t_data_sg->length, udev->data_size);
557
558 /* Get Data-In buffer */
559 gather_and_free_data_area(udev,
560 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
561 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
562 gather_and_free_data_area(udev,
563 se_cmd->t_data_sg, se_cmd->t_data_nents);
564 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
565 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
566 } else if (se_cmd->data_direction != DMA_NONE) {
567 pr_warn("TCMU: data direction was %d!\n",
568 se_cmd->data_direction);
569 }
570
571 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
572 cmd->se_cmd = NULL;
573
574 kmem_cache_free(tcmu_cmd_cache, cmd);
575 }
576
577 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
578 {
579 struct tcmu_mailbox *mb;
580 LIST_HEAD(cpl_cmds);
581 unsigned long flags;
582 int handled = 0;
583
584 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
585 pr_err("ring broken, not handling completions\n");
586 return 0;
587 }
588
589 spin_lock_irqsave(&udev->cmdr_lock, flags);
590
591 mb = udev->mb_addr;
592 tcmu_flush_dcache_range(mb, sizeof(*mb));
593
594 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
595
596 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
597 struct tcmu_cmd *cmd;
598
599 tcmu_flush_dcache_range(entry, sizeof(*entry));
600
601 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
602 UPDATE_HEAD(udev->cmdr_last_cleaned,
603 tcmu_hdr_get_len(entry->hdr.len_op),
604 udev->cmdr_size);
605 continue;
606 }
607 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
608
609 spin_lock(&udev->commands_lock);
610 cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
611 if (cmd)
612 idr_remove(&udev->commands, cmd->cmd_id);
613 spin_unlock(&udev->commands_lock);
614
615 if (!cmd) {
616 pr_err("cmd_id not found, ring is broken\n");
617 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
618 break;
619 }
620
621 tcmu_handle_completion(cmd, entry);
622
623 UPDATE_HEAD(udev->cmdr_last_cleaned,
624 tcmu_hdr_get_len(entry->hdr.len_op),
625 udev->cmdr_size);
626
627 handled++;
628 }
629
630 if (mb->cmd_tail == mb->cmd_head)
631 del_timer(&udev->timeout); /* no more pending cmds */
632
633 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
634
635 wake_up(&udev->wait_cmdr);
636
637 return handled;
638 }
639
640 static int tcmu_check_expired_cmd(int id, void *p, void *data)
641 {
642 struct tcmu_cmd *cmd = p;
643
644 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
645 return 0;
646
647 if (!time_after(cmd->deadline, jiffies))
648 return 0;
649
650 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
651 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
652 cmd->se_cmd = NULL;
653
654 kmem_cache_free(tcmu_cmd_cache, cmd);
655
656 return 0;
657 }
658
659 static void tcmu_device_timedout(unsigned long data)
660 {
661 struct tcmu_dev *udev = (struct tcmu_dev *)data;
662 unsigned long flags;
663 int handled;
664
665 handled = tcmu_handle_completions(udev);
666
667 pr_warn("%d completions handled from timeout\n", handled);
668
669 spin_lock_irqsave(&udev->commands_lock, flags);
670 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
671 spin_unlock_irqrestore(&udev->commands_lock, flags);
672
673 /*
674 * We don't need to wakeup threads on wait_cmdr since they have their
675 * own timeout.
676 */
677 }
678
679 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
680 {
681 struct tcmu_hba *tcmu_hba;
682
683 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
684 if (!tcmu_hba)
685 return -ENOMEM;
686
687 tcmu_hba->host_id = host_id;
688 hba->hba_ptr = tcmu_hba;
689
690 return 0;
691 }
692
693 static void tcmu_detach_hba(struct se_hba *hba)
694 {
695 kfree(hba->hba_ptr);
696 hba->hba_ptr = NULL;
697 }
698
699 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
700 {
701 struct tcmu_dev *udev;
702
703 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
704 if (!udev)
705 return NULL;
706
707 udev->name = kstrdup(name, GFP_KERNEL);
708 if (!udev->name) {
709 kfree(udev);
710 return NULL;
711 }
712
713 udev->hba = hba;
714
715 init_waitqueue_head(&udev->wait_cmdr);
716 spin_lock_init(&udev->cmdr_lock);
717
718 idr_init(&udev->commands);
719 spin_lock_init(&udev->commands_lock);
720
721 setup_timer(&udev->timeout, tcmu_device_timedout,
722 (unsigned long)udev);
723
724 return &udev->se_dev;
725 }
726
727 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
728 {
729 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
730
731 tcmu_handle_completions(tcmu_dev);
732
733 return 0;
734 }
735
736 /*
737 * mmap code from uio.c. Copied here because we want to hook mmap()
738 * and this stuff must come along.
739 */
740 static int tcmu_find_mem_index(struct vm_area_struct *vma)
741 {
742 struct tcmu_dev *udev = vma->vm_private_data;
743 struct uio_info *info = &udev->uio_info;
744
745 if (vma->vm_pgoff < MAX_UIO_MAPS) {
746 if (info->mem[vma->vm_pgoff].size == 0)
747 return -1;
748 return (int)vma->vm_pgoff;
749 }
750 return -1;
751 }
752
753 static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
754 {
755 struct tcmu_dev *udev = vma->vm_private_data;
756 struct uio_info *info = &udev->uio_info;
757 struct page *page;
758 unsigned long offset;
759 void *addr;
760
761 int mi = tcmu_find_mem_index(vma);
762 if (mi < 0)
763 return VM_FAULT_SIGBUS;
764
765 /*
766 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
767 * to use mem[N].
768 */
769 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
770
771 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
772 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
773 page = virt_to_page(addr);
774 else
775 page = vmalloc_to_page(addr);
776 get_page(page);
777 vmf->page = page;
778 return 0;
779 }
780
781 static const struct vm_operations_struct tcmu_vm_ops = {
782 .fault = tcmu_vma_fault,
783 };
784
785 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
786 {
787 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
788
789 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
790 vma->vm_ops = &tcmu_vm_ops;
791
792 vma->vm_private_data = udev;
793
794 /* Ensure the mmap is exactly the right size */
795 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
796 return -EINVAL;
797
798 return 0;
799 }
800
801 static int tcmu_open(struct uio_info *info, struct inode *inode)
802 {
803 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
804
805 /* O_EXCL not supported for char devs, so fake it? */
806 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
807 return -EBUSY;
808
809 pr_debug("open\n");
810
811 return 0;
812 }
813
814 static int tcmu_release(struct uio_info *info, struct inode *inode)
815 {
816 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
817
818 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
819
820 pr_debug("close\n");
821
822 return 0;
823 }
824
825 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
826 {
827 struct sk_buff *skb;
828 void *msg_header;
829 int ret = -ENOMEM;
830
831 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
832 if (!skb)
833 return ret;
834
835 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
836 if (!msg_header)
837 goto free_skb;
838
839 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
840 if (ret < 0)
841 goto free_skb;
842
843 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
844 if (ret < 0)
845 goto free_skb;
846
847 genlmsg_end(skb, msg_header);
848
849 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
850 TCMU_MCGRP_CONFIG, GFP_KERNEL);
851
852 /* We don't care if no one is listening */
853 if (ret == -ESRCH)
854 ret = 0;
855
856 return ret;
857 free_skb:
858 nlmsg_free(skb);
859 return ret;
860 }
861
862 static int tcmu_configure_device(struct se_device *dev)
863 {
864 struct tcmu_dev *udev = TCMU_DEV(dev);
865 struct tcmu_hba *hba = udev->hba->hba_ptr;
866 struct uio_info *info;
867 struct tcmu_mailbox *mb;
868 size_t size;
869 size_t used;
870 int ret = 0;
871 char *str;
872
873 info = &udev->uio_info;
874
875 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
876 udev->dev_config);
877 size += 1; /* for \0 */
878 str = kmalloc(size, GFP_KERNEL);
879 if (!str)
880 return -ENOMEM;
881
882 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
883
884 if (udev->dev_config[0])
885 snprintf(str + used, size - used, "/%s", udev->dev_config);
886
887 info->name = str;
888
889 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
890 if (!udev->mb_addr) {
891 ret = -ENOMEM;
892 goto err_vzalloc;
893 }
894
895 /* mailbox fits in first part of CMDR space */
896 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
897 udev->data_off = CMDR_SIZE;
898 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
899
900 mb = udev->mb_addr;
901 mb->version = TCMU_MAILBOX_VERSION;
902 mb->cmdr_off = CMDR_OFF;
903 mb->cmdr_size = udev->cmdr_size;
904
905 WARN_ON(!PAGE_ALIGNED(udev->data_off));
906 WARN_ON(udev->data_size % PAGE_SIZE);
907
908 info->version = xstr(TCMU_MAILBOX_VERSION);
909
910 info->mem[0].name = "tcm-user command & data buffer";
911 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
912 info->mem[0].size = TCMU_RING_SIZE;
913 info->mem[0].memtype = UIO_MEM_VIRTUAL;
914
915 info->irqcontrol = tcmu_irqcontrol;
916 info->irq = UIO_IRQ_CUSTOM;
917
918 info->mmap = tcmu_mmap;
919 info->open = tcmu_open;
920 info->release = tcmu_release;
921
922 ret = uio_register_device(tcmu_root_device, info);
923 if (ret)
924 goto err_register;
925
926 /* Other attributes can be configured in userspace */
927 dev->dev_attrib.hw_block_size = 512;
928 dev->dev_attrib.hw_max_sectors = 128;
929 dev->dev_attrib.hw_queue_depth = 128;
930
931 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
932 udev->uio_info.uio_dev->minor);
933 if (ret)
934 goto err_netlink;
935
936 return 0;
937
938 err_netlink:
939 uio_unregister_device(&udev->uio_info);
940 err_register:
941 vfree(udev->mb_addr);
942 err_vzalloc:
943 kfree(info->name);
944
945 return ret;
946 }
947
948 static int tcmu_check_pending_cmd(int id, void *p, void *data)
949 {
950 struct tcmu_cmd *cmd = p;
951
952 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
953 return 0;
954 return -EINVAL;
955 }
956
957 static void tcmu_dev_call_rcu(struct rcu_head *p)
958 {
959 struct se_device *dev = container_of(p, struct se_device, rcu_head);
960 struct tcmu_dev *udev = TCMU_DEV(dev);
961
962 kfree(udev);
963 }
964
965 static void tcmu_free_device(struct se_device *dev)
966 {
967 struct tcmu_dev *udev = TCMU_DEV(dev);
968 int i;
969
970 del_timer_sync(&udev->timeout);
971
972 vfree(udev->mb_addr);
973
974 /* Upper layer should drain all requests before calling this */
975 spin_lock_irq(&udev->commands_lock);
976 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
977 idr_destroy(&udev->commands);
978 spin_unlock_irq(&udev->commands_lock);
979 WARN_ON(i);
980
981 /* Device was configured */
982 if (udev->uio_info.uio_dev) {
983 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
984 udev->uio_info.uio_dev->minor);
985
986 uio_unregister_device(&udev->uio_info);
987 kfree(udev->uio_info.name);
988 kfree(udev->name);
989 }
990 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
991 }
992
993 enum {
994 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
995 };
996
997 static match_table_t tokens = {
998 {Opt_dev_config, "dev_config=%s"},
999 {Opt_dev_size, "dev_size=%u"},
1000 {Opt_hw_block_size, "hw_block_size=%u"},
1001 {Opt_err, NULL}
1002 };
1003
1004 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1005 const char *page, ssize_t count)
1006 {
1007 struct tcmu_dev *udev = TCMU_DEV(dev);
1008 char *orig, *ptr, *opts, *arg_p;
1009 substring_t args[MAX_OPT_ARGS];
1010 int ret = 0, token;
1011 unsigned long tmp_ul;
1012
1013 opts = kstrdup(page, GFP_KERNEL);
1014 if (!opts)
1015 return -ENOMEM;
1016
1017 orig = opts;
1018
1019 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1020 if (!*ptr)
1021 continue;
1022
1023 token = match_token(ptr, tokens, args);
1024 switch (token) {
1025 case Opt_dev_config:
1026 if (match_strlcpy(udev->dev_config, &args[0],
1027 TCMU_CONFIG_LEN) == 0) {
1028 ret = -EINVAL;
1029 break;
1030 }
1031 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1032 break;
1033 case Opt_dev_size:
1034 arg_p = match_strdup(&args[0]);
1035 if (!arg_p) {
1036 ret = -ENOMEM;
1037 break;
1038 }
1039 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1040 kfree(arg_p);
1041 if (ret < 0)
1042 pr_err("kstrtoul() failed for dev_size=\n");
1043 break;
1044 case Opt_hw_block_size:
1045 arg_p = match_strdup(&args[0]);
1046 if (!arg_p) {
1047 ret = -ENOMEM;
1048 break;
1049 }
1050 ret = kstrtoul(arg_p, 0, &tmp_ul);
1051 kfree(arg_p);
1052 if (ret < 0) {
1053 pr_err("kstrtoul() failed for hw_block_size=\n");
1054 break;
1055 }
1056 if (!tmp_ul) {
1057 pr_err("hw_block_size must be nonzero\n");
1058 break;
1059 }
1060 dev->dev_attrib.hw_block_size = tmp_ul;
1061 break;
1062 default:
1063 break;
1064 }
1065 }
1066
1067 kfree(orig);
1068 return (!ret) ? count : ret;
1069 }
1070
1071 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1072 {
1073 struct tcmu_dev *udev = TCMU_DEV(dev);
1074 ssize_t bl = 0;
1075
1076 bl = sprintf(b + bl, "Config: %s ",
1077 udev->dev_config[0] ? udev->dev_config : "NULL");
1078 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1079
1080 return bl;
1081 }
1082
1083 static sector_t tcmu_get_blocks(struct se_device *dev)
1084 {
1085 struct tcmu_dev *udev = TCMU_DEV(dev);
1086
1087 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1088 dev->dev_attrib.block_size);
1089 }
1090
1091 static sense_reason_t
1092 tcmu_pass_op(struct se_cmd *se_cmd)
1093 {
1094 int ret = tcmu_queue_cmd(se_cmd);
1095
1096 if (ret != 0)
1097 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1098 else
1099 return TCM_NO_SENSE;
1100 }
1101
1102 static sense_reason_t
1103 tcmu_parse_cdb(struct se_cmd *cmd)
1104 {
1105 return passthrough_parse_cdb(cmd, tcmu_pass_op);
1106 }
1107
1108 static const struct target_backend_ops tcmu_ops = {
1109 .name = "user",
1110 .inquiry_prod = "USER",
1111 .inquiry_rev = TCMU_VERSION,
1112 .owner = THIS_MODULE,
1113 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1114 .attach_hba = tcmu_attach_hba,
1115 .detach_hba = tcmu_detach_hba,
1116 .alloc_device = tcmu_alloc_device,
1117 .configure_device = tcmu_configure_device,
1118 .free_device = tcmu_free_device,
1119 .parse_cdb = tcmu_parse_cdb,
1120 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1121 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1122 .get_device_type = sbc_get_device_type,
1123 .get_blocks = tcmu_get_blocks,
1124 .tb_dev_attrib_attrs = passthrough_attrib_attrs,
1125 };
1126
1127 static int __init tcmu_module_init(void)
1128 {
1129 int ret;
1130
1131 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1132
1133 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1134 sizeof(struct tcmu_cmd),
1135 __alignof__(struct tcmu_cmd),
1136 0, NULL);
1137 if (!tcmu_cmd_cache)
1138 return -ENOMEM;
1139
1140 tcmu_root_device = root_device_register("tcm_user");
1141 if (IS_ERR(tcmu_root_device)) {
1142 ret = PTR_ERR(tcmu_root_device);
1143 goto out_free_cache;
1144 }
1145
1146 ret = genl_register_family(&tcmu_genl_family);
1147 if (ret < 0) {
1148 goto out_unreg_device;
1149 }
1150
1151 ret = transport_backend_register(&tcmu_ops);
1152 if (ret)
1153 goto out_unreg_genl;
1154
1155 return 0;
1156
1157 out_unreg_genl:
1158 genl_unregister_family(&tcmu_genl_family);
1159 out_unreg_device:
1160 root_device_unregister(tcmu_root_device);
1161 out_free_cache:
1162 kmem_cache_destroy(tcmu_cmd_cache);
1163
1164 return ret;
1165 }
1166
1167 static void __exit tcmu_module_exit(void)
1168 {
1169 target_backend_unregister(&tcmu_ops);
1170 genl_unregister_family(&tcmu_genl_family);
1171 root_device_unregister(tcmu_root_device);
1172 kmem_cache_destroy(tcmu_cmd_cache);
1173 }
1174
1175 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1176 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1177 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1178 MODULE_LICENSE("GPL");
1179
1180 module_init(tcmu_module_init);
1181 module_exit(tcmu_module_exit);
This page took 0.056051 seconds and 5 git commands to generate.