target: Core cleanups from AGrover (round 1)
[deliverable/linux.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2 * Filename: target_core_rd.c
3 *
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_transport.h>
43 #include <target/target_core_fabric_ops.h>
44
45 #include "target_core_rd.h"
46
47 static struct se_subsystem_api rd_dr_template;
48 static struct se_subsystem_api rd_mcp_template;
49
50 /* #define DEBUG_RAMDISK_MCP */
51 /* #define DEBUG_RAMDISK_DR */
52
53 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
54 *
55 *
56 */
57 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
58 {
59 struct rd_host *rd_host;
60
61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
62 if (!(rd_host)) {
63 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
64 return -ENOMEM;
65 }
66
67 rd_host->rd_host_id = host_id;
68
69 hba->hba_ptr = (void *) rd_host;
70
71 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
72 " Generic Target Core Stack %s\n", hba->hba_id,
73 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
74 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
75 " MaxSectors: %u\n", hba->hba_id,
76 rd_host->rd_host_id, RD_MAX_SECTORS);
77
78 return 0;
79 }
80
81 static void rd_detach_hba(struct se_hba *hba)
82 {
83 struct rd_host *rd_host = hba->hba_ptr;
84
85 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
86 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
87
88 kfree(rd_host);
89 hba->hba_ptr = NULL;
90 }
91
92 /* rd_release_device_space():
93 *
94 *
95 */
96 static void rd_release_device_space(struct rd_dev *rd_dev)
97 {
98 u32 i, j, page_count = 0, sg_per_table;
99 struct rd_dev_sg_table *sg_table;
100 struct page *pg;
101 struct scatterlist *sg;
102
103 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
104 return;
105
106 sg_table = rd_dev->sg_table_array;
107
108 for (i = 0; i < rd_dev->sg_table_count; i++) {
109 sg = sg_table[i].sg_table;
110 sg_per_table = sg_table[i].rd_sg_count;
111
112 for (j = 0; j < sg_per_table; j++) {
113 pg = sg_page(&sg[j]);
114 if ((pg)) {
115 __free_page(pg);
116 page_count++;
117 }
118 }
119
120 kfree(sg);
121 }
122
123 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
124 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
125 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
126 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
127
128 kfree(sg_table);
129 rd_dev->sg_table_array = NULL;
130 rd_dev->sg_table_count = 0;
131 }
132
133
134 /* rd_build_device_space():
135 *
136 *
137 */
138 static int rd_build_device_space(struct rd_dev *rd_dev)
139 {
140 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
141 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
142 sizeof(struct scatterlist));
143 struct rd_dev_sg_table *sg_table;
144 struct page *pg;
145 struct scatterlist *sg;
146
147 if (rd_dev->rd_page_count <= 0) {
148 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
149 rd_dev->rd_page_count);
150 return -EINVAL;
151 }
152 total_sg_needed = rd_dev->rd_page_count;
153
154 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
155
156 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
157 if (!(sg_table)) {
158 printk(KERN_ERR "Unable to allocate memory for Ramdisk"
159 " scatterlist tables\n");
160 return -ENOMEM;
161 }
162
163 rd_dev->sg_table_array = sg_table;
164 rd_dev->sg_table_count = sg_tables;
165
166 while (total_sg_needed) {
167 sg_per_table = (total_sg_needed > max_sg_per_table) ?
168 max_sg_per_table : total_sg_needed;
169
170 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
171 GFP_KERNEL);
172 if (!(sg)) {
173 printk(KERN_ERR "Unable to allocate scatterlist array"
174 " for struct rd_dev\n");
175 return -ENOMEM;
176 }
177
178 sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
179
180 sg_table[i].sg_table = sg;
181 sg_table[i].rd_sg_count = sg_per_table;
182 sg_table[i].page_start_offset = page_offset;
183 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
184 - 1;
185
186 for (j = 0; j < sg_per_table; j++) {
187 pg = alloc_pages(GFP_KERNEL, 0);
188 if (!(pg)) {
189 printk(KERN_ERR "Unable to allocate scatterlist"
190 " pages for struct rd_dev_sg_table\n");
191 return -ENOMEM;
192 }
193 sg_assign_page(&sg[j], pg);
194 sg[j].length = PAGE_SIZE;
195 }
196
197 page_offset += sg_per_table;
198 total_sg_needed -= sg_per_table;
199 }
200
201 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
202 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
203 rd_dev->rd_dev_id, rd_dev->rd_page_count,
204 rd_dev->sg_table_count);
205
206 return 0;
207 }
208
209 static void *rd_allocate_virtdevice(
210 struct se_hba *hba,
211 const char *name,
212 int rd_direct)
213 {
214 struct rd_dev *rd_dev;
215 struct rd_host *rd_host = hba->hba_ptr;
216
217 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
218 if (!(rd_dev)) {
219 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
220 return NULL;
221 }
222
223 rd_dev->rd_host = rd_host;
224 rd_dev->rd_direct = rd_direct;
225
226 return rd_dev;
227 }
228
229 static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
230 {
231 return rd_allocate_virtdevice(hba, name, 1);
232 }
233
234 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
235 {
236 return rd_allocate_virtdevice(hba, name, 0);
237 }
238
239 /* rd_create_virtdevice():
240 *
241 *
242 */
243 static struct se_device *rd_create_virtdevice(
244 struct se_hba *hba,
245 struct se_subsystem_dev *se_dev,
246 void *p,
247 int rd_direct)
248 {
249 struct se_device *dev;
250 struct se_dev_limits dev_limits;
251 struct rd_dev *rd_dev = p;
252 struct rd_host *rd_host = hba->hba_ptr;
253 int dev_flags = 0, ret;
254 char prod[16], rev[4];
255
256 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
257
258 ret = rd_build_device_space(rd_dev);
259 if (ret < 0)
260 goto fail;
261
262 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
263 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
264 RD_MCP_VERSION);
265
266 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
267 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
268 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
269 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
270 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
271
272 dev = transport_add_device_to_core_hba(hba,
273 (rd_dev->rd_direct) ? &rd_dr_template :
274 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
275 &dev_limits, prod, rev);
276 if (!(dev))
277 goto fail;
278
279 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
280 rd_dev->rd_queue_depth = dev->queue_depth;
281
282 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
283 " %u pages in %u tables, %lu total bytes\n",
284 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
285 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
286 rd_dev->sg_table_count,
287 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
288
289 return dev;
290
291 fail:
292 rd_release_device_space(rd_dev);
293 return ERR_PTR(ret);
294 }
295
296 static struct se_device *rd_DIRECT_create_virtdevice(
297 struct se_hba *hba,
298 struct se_subsystem_dev *se_dev,
299 void *p)
300 {
301 return rd_create_virtdevice(hba, se_dev, p, 1);
302 }
303
304 static struct se_device *rd_MEMCPY_create_virtdevice(
305 struct se_hba *hba,
306 struct se_subsystem_dev *se_dev,
307 void *p)
308 {
309 return rd_create_virtdevice(hba, se_dev, p, 0);
310 }
311
312 /* rd_free_device(): (Part of se_subsystem_api_t template)
313 *
314 *
315 */
316 static void rd_free_device(void *p)
317 {
318 struct rd_dev *rd_dev = p;
319
320 rd_release_device_space(rd_dev);
321 kfree(rd_dev);
322 }
323
324 static inline struct rd_request *RD_REQ(struct se_task *task)
325 {
326 return container_of(task, struct rd_request, rd_task);
327 }
328
329 static struct se_task *
330 rd_alloc_task(struct se_cmd *cmd)
331 {
332 struct rd_request *rd_req;
333
334 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
335 if (!rd_req) {
336 printk(KERN_ERR "Unable to allocate struct rd_request\n");
337 return NULL;
338 }
339 rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
340
341 return &rd_req->rd_task;
342 }
343
344 /* rd_get_sg_table():
345 *
346 *
347 */
348 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
349 {
350 u32 i;
351 struct rd_dev_sg_table *sg_table;
352
353 for (i = 0; i < rd_dev->sg_table_count; i++) {
354 sg_table = &rd_dev->sg_table_array[i];
355 if ((sg_table->page_start_offset <= page) &&
356 (sg_table->page_end_offset >= page))
357 return sg_table;
358 }
359
360 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
361 page);
362
363 return NULL;
364 }
365
366 /* rd_MEMCPY_read():
367 *
368 *
369 */
370 static int rd_MEMCPY_read(struct rd_request *req)
371 {
372 struct se_task *task = &req->rd_task;
373 struct rd_dev *dev = req->rd_dev;
374 struct rd_dev_sg_table *table;
375 struct scatterlist *sg_d, *sg_s;
376 void *dst, *src;
377 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
378 u32 length, page_end = 0, table_sg_end;
379 u32 rd_offset = req->rd_offset;
380
381 table = rd_get_sg_table(dev, req->rd_page);
382 if (!(table))
383 return -EINVAL;
384
385 table_sg_end = (table->page_end_offset - req->rd_page);
386 sg_d = task->task_sg;
387 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
388 #ifdef DEBUG_RAMDISK_MCP
389 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
390 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
391 req->rd_page, req->rd_offset);
392 #endif
393 src_offset = rd_offset;
394
395 while (req->rd_size) {
396 if ((sg_d[i].length - dst_offset) <
397 (sg_s[j].length - src_offset)) {
398 length = (sg_d[i].length - dst_offset);
399 #ifdef DEBUG_RAMDISK_MCP
400 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
401 " offset: %u sg_s[%d].length: %u\n", i,
402 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
403 sg_s[j].length);
404 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
405 " src_offset: %u\n", length, dst_offset,
406 src_offset);
407 #endif
408 if (length > req->rd_size)
409 length = req->rd_size;
410
411 dst = sg_virt(&sg_d[i++]) + dst_offset;
412 if (!dst)
413 BUG();
414
415 src = sg_virt(&sg_s[j]) + src_offset;
416 if (!src)
417 BUG();
418
419 dst_offset = 0;
420 src_offset = length;
421 page_end = 0;
422 } else {
423 length = (sg_s[j].length - src_offset);
424 #ifdef DEBUG_RAMDISK_MCP
425 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
426 " offset: %u sg_s[%d].length: %u\n", i,
427 &sg_d[i], sg_d[i].length, sg_d[i].offset,
428 j, sg_s[j].length);
429 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
430 " src_offset: %u\n", length, dst_offset,
431 src_offset);
432 #endif
433 if (length > req->rd_size)
434 length = req->rd_size;
435
436 dst = sg_virt(&sg_d[i]) + dst_offset;
437 if (!dst)
438 BUG();
439
440 if (sg_d[i].length == length) {
441 i++;
442 dst_offset = 0;
443 } else
444 dst_offset = length;
445
446 src = sg_virt(&sg_s[j++]) + src_offset;
447 if (!src)
448 BUG();
449
450 src_offset = 0;
451 page_end = 1;
452 }
453
454 memcpy(dst, src, length);
455
456 #ifdef DEBUG_RAMDISK_MCP
457 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
458 " i: %u, j: %u\n", req->rd_page,
459 (req->rd_size - length), length, i, j);
460 #endif
461 req->rd_size -= length;
462 if (!(req->rd_size))
463 return 0;
464
465 if (!page_end)
466 continue;
467
468 if (++req->rd_page <= table->page_end_offset) {
469 #ifdef DEBUG_RAMDISK_MCP
470 printk(KERN_INFO "page: %u in same page table\n",
471 req->rd_page);
472 #endif
473 continue;
474 }
475 #ifdef DEBUG_RAMDISK_MCP
476 printk(KERN_INFO "getting new page table for page: %u\n",
477 req->rd_page);
478 #endif
479 table = rd_get_sg_table(dev, req->rd_page);
480 if (!(table))
481 return -EINVAL;
482
483 sg_s = &table->sg_table[j = 0];
484 }
485
486 return 0;
487 }
488
489 /* rd_MEMCPY_write():
490 *
491 *
492 */
493 static int rd_MEMCPY_write(struct rd_request *req)
494 {
495 struct se_task *task = &req->rd_task;
496 struct rd_dev *dev = req->rd_dev;
497 struct rd_dev_sg_table *table;
498 struct scatterlist *sg_d, *sg_s;
499 void *dst, *src;
500 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
501 u32 length, page_end = 0, table_sg_end;
502 u32 rd_offset = req->rd_offset;
503
504 table = rd_get_sg_table(dev, req->rd_page);
505 if (!(table))
506 return -EINVAL;
507
508 table_sg_end = (table->page_end_offset - req->rd_page);
509 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
510 sg_s = task->task_sg;
511 #ifdef DEBUG_RAMDISK_MCP
512 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
513 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
514 req->rd_page, req->rd_offset);
515 #endif
516 dst_offset = rd_offset;
517
518 while (req->rd_size) {
519 if ((sg_s[i].length - src_offset) <
520 (sg_d[j].length - dst_offset)) {
521 length = (sg_s[i].length - src_offset);
522 #ifdef DEBUG_RAMDISK_MCP
523 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
524 " offset: %d sg_d[%d].length: %u\n", i,
525 &sg_s[i], sg_s[i].length, sg_s[i].offset,
526 j, sg_d[j].length);
527 printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
528 " dst_offset: %u\n", length, src_offset,
529 dst_offset);
530 #endif
531 if (length > req->rd_size)
532 length = req->rd_size;
533
534 src = sg_virt(&sg_s[i++]) + src_offset;
535 if (!src)
536 BUG();
537
538 dst = sg_virt(&sg_d[j]) + dst_offset;
539 if (!dst)
540 BUG();
541
542 src_offset = 0;
543 dst_offset = length;
544 page_end = 0;
545 } else {
546 length = (sg_d[j].length - dst_offset);
547 #ifdef DEBUG_RAMDISK_MCP
548 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
549 " offset: %d sg_d[%d].length: %u\n", i,
550 &sg_s[i], sg_s[i].length, sg_s[i].offset,
551 j, sg_d[j].length);
552 printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
553 " dst_offset: %u\n", length, src_offset,
554 dst_offset);
555 #endif
556 if (length > req->rd_size)
557 length = req->rd_size;
558
559 src = sg_virt(&sg_s[i]) + src_offset;
560 if (!src)
561 BUG();
562
563 if (sg_s[i].length == length) {
564 i++;
565 src_offset = 0;
566 } else
567 src_offset = length;
568
569 dst = sg_virt(&sg_d[j++]) + dst_offset;
570 if (!dst)
571 BUG();
572
573 dst_offset = 0;
574 page_end = 1;
575 }
576
577 memcpy(dst, src, length);
578
579 #ifdef DEBUG_RAMDISK_MCP
580 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
581 " i: %u, j: %u\n", req->rd_page,
582 (req->rd_size - length), length, i, j);
583 #endif
584 req->rd_size -= length;
585 if (!(req->rd_size))
586 return 0;
587
588 if (!page_end)
589 continue;
590
591 if (++req->rd_page <= table->page_end_offset) {
592 #ifdef DEBUG_RAMDISK_MCP
593 printk(KERN_INFO "page: %u in same page table\n",
594 req->rd_page);
595 #endif
596 continue;
597 }
598 #ifdef DEBUG_RAMDISK_MCP
599 printk(KERN_INFO "getting new page table for page: %u\n",
600 req->rd_page);
601 #endif
602 table = rd_get_sg_table(dev, req->rd_page);
603 if (!(table))
604 return -EINVAL;
605
606 sg_d = &table->sg_table[j = 0];
607 }
608
609 return 0;
610 }
611
612 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
613 *
614 *
615 */
616 static int rd_MEMCPY_do_task(struct se_task *task)
617 {
618 struct se_device *dev = task->se_dev;
619 struct rd_request *req = RD_REQ(task);
620 unsigned long long lba;
621 int ret;
622
623 req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
624 lba = task->task_lba;
625 req->rd_offset = (do_div(lba,
626 (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
627 dev->se_sub_dev->se_dev_attrib.block_size;
628 req->rd_size = task->task_size;
629
630 if (task->task_data_direction == DMA_FROM_DEVICE)
631 ret = rd_MEMCPY_read(req);
632 else
633 ret = rd_MEMCPY_write(req);
634
635 if (ret != 0)
636 return ret;
637
638 task->task_scsi_status = GOOD;
639 transport_complete_task(task, 1);
640
641 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
642 }
643
644 /* rd_DIRECT_with_offset():
645 *
646 *
647 */
648 static int rd_DIRECT_with_offset(
649 struct se_task *task,
650 struct list_head *se_mem_list,
651 u32 *se_mem_cnt,
652 u32 *task_offset)
653 {
654 struct rd_request *req = RD_REQ(task);
655 struct rd_dev *dev = req->rd_dev;
656 struct rd_dev_sg_table *table;
657 struct se_mem *se_mem;
658 struct scatterlist *sg_s;
659 u32 j = 0, set_offset = 1;
660 u32 get_next_table = 0, offset_length, table_sg_end;
661
662 table = rd_get_sg_table(dev, req->rd_page);
663 if (!(table))
664 return -EINVAL;
665
666 table_sg_end = (table->page_end_offset - req->rd_page);
667 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
668 #ifdef DEBUG_RAMDISK_DR
669 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
670 (task->task_data_direction == DMA_TO_DEVICE) ?
671 "Write" : "Read",
672 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
673 #endif
674 while (req->rd_size) {
675 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
676 if (!(se_mem)) {
677 printk(KERN_ERR "Unable to allocate struct se_mem\n");
678 return -ENOMEM;
679 }
680 INIT_LIST_HEAD(&se_mem->se_list);
681
682 if (set_offset) {
683 offset_length = sg_s[j].length - req->rd_offset;
684 if (offset_length > req->rd_size)
685 offset_length = req->rd_size;
686
687 se_mem->se_page = sg_page(&sg_s[j++]);
688 se_mem->se_off = req->rd_offset;
689 se_mem->se_len = offset_length;
690
691 set_offset = 0;
692 get_next_table = (j > table_sg_end);
693 goto check_eot;
694 }
695
696 offset_length = (req->rd_size < req->rd_offset) ?
697 req->rd_size : req->rd_offset;
698
699 se_mem->se_page = sg_page(&sg_s[j]);
700 se_mem->se_len = offset_length;
701
702 set_offset = 1;
703
704 check_eot:
705 #ifdef DEBUG_RAMDISK_DR
706 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
707 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
708 req->rd_page, req->rd_size, offset_length, j, se_mem,
709 se_mem->se_page, se_mem->se_off, se_mem->se_len);
710 #endif
711 list_add_tail(&se_mem->se_list, se_mem_list);
712 (*se_mem_cnt)++;
713
714 req->rd_size -= offset_length;
715 if (!(req->rd_size))
716 goto out;
717
718 if (!set_offset && !get_next_table)
719 continue;
720
721 if (++req->rd_page <= table->page_end_offset) {
722 #ifdef DEBUG_RAMDISK_DR
723 printk(KERN_INFO "page: %u in same page table\n",
724 req->rd_page);
725 #endif
726 continue;
727 }
728 #ifdef DEBUG_RAMDISK_DR
729 printk(KERN_INFO "getting new page table for page: %u\n",
730 req->rd_page);
731 #endif
732 table = rd_get_sg_table(dev, req->rd_page);
733 if (!(table))
734 return -EINVAL;
735
736 sg_s = &table->sg_table[j = 0];
737 }
738
739 out:
740 task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
741 #ifdef DEBUG_RAMDISK_DR
742 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
743 *se_mem_cnt);
744 #endif
745 return 0;
746 }
747
748 /* rd_DIRECT_without_offset():
749 *
750 *
751 */
752 static int rd_DIRECT_without_offset(
753 struct se_task *task,
754 struct list_head *se_mem_list,
755 u32 *se_mem_cnt,
756 u32 *task_offset)
757 {
758 struct rd_request *req = RD_REQ(task);
759 struct rd_dev *dev = req->rd_dev;
760 struct rd_dev_sg_table *table;
761 struct se_mem *se_mem;
762 struct scatterlist *sg_s;
763 u32 length, j = 0;
764
765 table = rd_get_sg_table(dev, req->rd_page);
766 if (!(table))
767 return -EINVAL;
768
769 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
770 #ifdef DEBUG_RAMDISK_DR
771 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
772 (task->task_data_direction == DMA_TO_DEVICE) ?
773 "Write" : "Read",
774 task->task_lba, req->rd_size, req->rd_page);
775 #endif
776 while (req->rd_size) {
777 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
778 if (!(se_mem)) {
779 printk(KERN_ERR "Unable to allocate struct se_mem\n");
780 return -ENOMEM;
781 }
782 INIT_LIST_HEAD(&se_mem->se_list);
783
784 length = (req->rd_size < sg_s[j].length) ?
785 req->rd_size : sg_s[j].length;
786
787 se_mem->se_page = sg_page(&sg_s[j++]);
788 se_mem->se_len = length;
789
790 #ifdef DEBUG_RAMDISK_DR
791 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
792 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
793 req->rd_size, j, se_mem, se_mem->se_page,
794 se_mem->se_off, se_mem->se_len);
795 #endif
796 list_add_tail(&se_mem->se_list, se_mem_list);
797 (*se_mem_cnt)++;
798
799 req->rd_size -= length;
800 if (!(req->rd_size))
801 goto out;
802
803 if (++req->rd_page <= table->page_end_offset) {
804 #ifdef DEBUG_RAMDISK_DR
805 printk("page: %u in same page table\n",
806 req->rd_page);
807 #endif
808 continue;
809 }
810 #ifdef DEBUG_RAMDISK_DR
811 printk(KERN_INFO "getting new page table for page: %u\n",
812 req->rd_page);
813 #endif
814 table = rd_get_sg_table(dev, req->rd_page);
815 if (!(table))
816 return -EINVAL;
817
818 sg_s = &table->sg_table[j = 0];
819 }
820
821 out:
822 task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
823 #ifdef DEBUG_RAMDISK_DR
824 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
825 *se_mem_cnt);
826 #endif
827 return 0;
828 }
829
830 /* rd_DIRECT_do_se_mem_map():
831 *
832 *
833 */
834 static int rd_DIRECT_do_se_mem_map(
835 struct se_task *task,
836 struct list_head *se_mem_list,
837 void *in_mem,
838 struct se_mem *in_se_mem,
839 struct se_mem **out_se_mem,
840 u32 *se_mem_cnt,
841 u32 *task_offset_in)
842 {
843 struct se_cmd *cmd = task->task_se_cmd;
844 struct rd_request *req = RD_REQ(task);
845 u32 task_offset = *task_offset_in;
846 unsigned long long lba;
847 int ret;
848 int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
849
850 lba = task->task_lba;
851 req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE);
852 req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
853 req->rd_size = task->task_size;
854
855 if (req->rd_offset)
856 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
857 task_offset_in);
858 else
859 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
860 task_offset_in);
861
862 if (ret < 0)
863 return ret;
864
865 if (cmd->se_tfo->task_sg_chaining == 0)
866 return 0;
867 /*
868 * Currently prevent writers from multiple HW fabrics doing
869 * pci_map_sg() to RD_DR's internal scatterlist memory.
870 */
871 if (cmd->data_direction == DMA_TO_DEVICE) {
872 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
873 " RAMDISK_DR with task_sg_chaining=1\n");
874 return -ENOSYS;
875 }
876 /*
877 * Special case for if task_sg_chaining is enabled, then
878 * we setup struct se_task->task_sg[], as it will be used by
879 * transport_do_task_sg_chain() for creating chainged SGLs
880 * across multiple struct se_task->task_sg[].
881 */
882 ret = transport_init_task_sg(task,
883 list_entry(cmd->t_task->t_mem_list->next,
884 struct se_mem, se_list),
885 task_offset);
886 if (ret <= 0)
887 return ret;
888
889 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
890 list_entry(cmd->t_task->t_mem_list->next,
891 struct se_mem, se_list),
892 out_se_mem, se_mem_cnt, task_offset_in);
893 }
894
895 /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
896 *
897 *
898 */
899 static int rd_DIRECT_do_task(struct se_task *task)
900 {
901 /*
902 * At this point the locally allocated RD tables have been mapped
903 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
904 */
905 task->task_scsi_status = GOOD;
906 transport_complete_task(task, 1);
907
908 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
909 }
910
911 /* rd_free_task(): (Part of se_subsystem_api_t template)
912 *
913 *
914 */
915 static void rd_free_task(struct se_task *task)
916 {
917 kfree(RD_REQ(task));
918 }
919
920 enum {
921 Opt_rd_pages, Opt_err
922 };
923
924 static match_table_t tokens = {
925 {Opt_rd_pages, "rd_pages=%d"},
926 {Opt_err, NULL}
927 };
928
929 static ssize_t rd_set_configfs_dev_params(
930 struct se_hba *hba,
931 struct se_subsystem_dev *se_dev,
932 const char *page,
933 ssize_t count)
934 {
935 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
936 char *orig, *ptr, *opts;
937 substring_t args[MAX_OPT_ARGS];
938 int ret = 0, arg, token;
939
940 opts = kstrdup(page, GFP_KERNEL);
941 if (!opts)
942 return -ENOMEM;
943
944 orig = opts;
945
946 while ((ptr = strsep(&opts, ",")) != NULL) {
947 if (!*ptr)
948 continue;
949
950 token = match_token(ptr, tokens, args);
951 switch (token) {
952 case Opt_rd_pages:
953 match_int(args, &arg);
954 rd_dev->rd_page_count = arg;
955 printk(KERN_INFO "RAMDISK: Referencing Page"
956 " Count: %u\n", rd_dev->rd_page_count);
957 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
958 break;
959 default:
960 break;
961 }
962 }
963
964 kfree(orig);
965 return (!ret) ? count : ret;
966 }
967
968 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
969 {
970 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
971
972 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
973 printk(KERN_INFO "Missing rd_pages= parameter\n");
974 return -EINVAL;
975 }
976
977 return 0;
978 }
979
980 static ssize_t rd_show_configfs_dev_params(
981 struct se_hba *hba,
982 struct se_subsystem_dev *se_dev,
983 char *b)
984 {
985 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
986 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
987 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
988 "rd_direct" : "rd_mcp");
989 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
990 " SG_table_count: %u\n", rd_dev->rd_page_count,
991 PAGE_SIZE, rd_dev->sg_table_count);
992 return bl;
993 }
994
995 /* rd_get_cdb(): (Part of se_subsystem_api_t template)
996 *
997 *
998 */
999 static unsigned char *rd_get_cdb(struct se_task *task)
1000 {
1001 struct rd_request *req = RD_REQ(task);
1002
1003 return req->rd_scsi_cdb;
1004 }
1005
1006 static u32 rd_get_device_rev(struct se_device *dev)
1007 {
1008 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1009 }
1010
1011 static u32 rd_get_device_type(struct se_device *dev)
1012 {
1013 return TYPE_DISK;
1014 }
1015
1016 static sector_t rd_get_blocks(struct se_device *dev)
1017 {
1018 struct rd_dev *rd_dev = dev->dev_ptr;
1019 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1020 dev->se_sub_dev->se_dev_attrib.block_size) - 1;
1021
1022 return blocks_long;
1023 }
1024
1025 static struct se_subsystem_api rd_dr_template = {
1026 .name = "rd_dr",
1027 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1028 .attach_hba = rd_attach_hba,
1029 .detach_hba = rd_detach_hba,
1030 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1031 .create_virtdevice = rd_DIRECT_create_virtdevice,
1032 .free_device = rd_free_device,
1033 .alloc_task = rd_alloc_task,
1034 .do_task = rd_DIRECT_do_task,
1035 .free_task = rd_free_task,
1036 .check_configfs_dev_params = rd_check_configfs_dev_params,
1037 .set_configfs_dev_params = rd_set_configfs_dev_params,
1038 .show_configfs_dev_params = rd_show_configfs_dev_params,
1039 .get_cdb = rd_get_cdb,
1040 .get_device_rev = rd_get_device_rev,
1041 .get_device_type = rd_get_device_type,
1042 .get_blocks = rd_get_blocks,
1043 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1044 };
1045
1046 static struct se_subsystem_api rd_mcp_template = {
1047 .name = "rd_mcp",
1048 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1049 .attach_hba = rd_attach_hba,
1050 .detach_hba = rd_detach_hba,
1051 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
1052 .create_virtdevice = rd_MEMCPY_create_virtdevice,
1053 .free_device = rd_free_device,
1054 .alloc_task = rd_alloc_task,
1055 .do_task = rd_MEMCPY_do_task,
1056 .free_task = rd_free_task,
1057 .check_configfs_dev_params = rd_check_configfs_dev_params,
1058 .set_configfs_dev_params = rd_set_configfs_dev_params,
1059 .show_configfs_dev_params = rd_show_configfs_dev_params,
1060 .get_cdb = rd_get_cdb,
1061 .get_device_rev = rd_get_device_rev,
1062 .get_device_type = rd_get_device_type,
1063 .get_blocks = rd_get_blocks,
1064 };
1065
1066 int __init rd_module_init(void)
1067 {
1068 int ret;
1069
1070 ret = transport_subsystem_register(&rd_dr_template);
1071 if (ret < 0)
1072 return ret;
1073
1074 ret = transport_subsystem_register(&rd_mcp_template);
1075 if (ret < 0) {
1076 transport_subsystem_release(&rd_dr_template);
1077 return ret;
1078 }
1079
1080 return 0;
1081 }
1082
1083 void rd_module_exit(void)
1084 {
1085 transport_subsystem_release(&rd_dr_template);
1086 transport_subsystem_release(&rd_mcp_template);
1087 }
This page took 0.248658 seconds and 5 git commands to generate.