| 1 | /******************************************************************************* |
| 2 | * Filename: target_core_rd.c |
| 3 | * |
| 4 | * This file contains the Storage Engine <-> Ramdisk transport |
| 5 | * specific functions. |
| 6 | * |
| 7 | * (c) Copyright 2003-2013 Datera, Inc. |
| 8 | * |
| 9 | * Nicholas A. Bellinger <nab@kernel.org> |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2 of the License, or |
| 14 | * (at your option) any later version. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, |
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 19 | * GNU General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; if not, write to the Free Software |
| 23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 24 | * |
| 25 | ******************************************************************************/ |
| 26 | |
| 27 | #include <linux/string.h> |
| 28 | #include <linux/parser.h> |
| 29 | #include <linux/timer.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | #include <scsi/scsi.h> |
| 33 | #include <scsi/scsi_host.h> |
| 34 | |
| 35 | #include <target/target_core_base.h> |
| 36 | #include <target/target_core_backend.h> |
| 37 | #include <target/target_core_backend_configfs.h> |
| 38 | |
| 39 | #include "target_core_rd.h" |
| 40 | |
| 41 | static inline struct rd_dev *RD_DEV(struct se_device *dev) |
| 42 | { |
| 43 | return container_of(dev, struct rd_dev, dev); |
| 44 | } |
| 45 | |
| 46 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) |
| 47 | * |
| 48 | * |
| 49 | */ |
| 50 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) |
| 51 | { |
| 52 | struct rd_host *rd_host; |
| 53 | |
| 54 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); |
| 55 | if (!rd_host) { |
| 56 | pr_err("Unable to allocate memory for struct rd_host\n"); |
| 57 | return -ENOMEM; |
| 58 | } |
| 59 | |
| 60 | rd_host->rd_host_id = host_id; |
| 61 | |
| 62 | hba->hba_ptr = rd_host; |
| 63 | |
| 64 | pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" |
| 65 | " Generic Target Core Stack %s\n", hba->hba_id, |
| 66 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); |
| 67 | |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static void rd_detach_hba(struct se_hba *hba) |
| 72 | { |
| 73 | struct rd_host *rd_host = hba->hba_ptr; |
| 74 | |
| 75 | pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" |
| 76 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); |
| 77 | |
| 78 | kfree(rd_host); |
| 79 | hba->hba_ptr = NULL; |
| 80 | } |
| 81 | |
| 82 | static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, |
| 83 | u32 sg_table_count) |
| 84 | { |
| 85 | struct page *pg; |
| 86 | struct scatterlist *sg; |
| 87 | u32 i, j, page_count = 0, sg_per_table; |
| 88 | |
| 89 | for (i = 0; i < sg_table_count; i++) { |
| 90 | sg = sg_table[i].sg_table; |
| 91 | sg_per_table = sg_table[i].rd_sg_count; |
| 92 | |
| 93 | for (j = 0; j < sg_per_table; j++) { |
| 94 | pg = sg_page(&sg[j]); |
| 95 | if (pg) { |
| 96 | __free_page(pg); |
| 97 | page_count++; |
| 98 | } |
| 99 | } |
| 100 | kfree(sg); |
| 101 | } |
| 102 | |
| 103 | kfree(sg_table); |
| 104 | return page_count; |
| 105 | } |
| 106 | |
| 107 | static void rd_release_device_space(struct rd_dev *rd_dev) |
| 108 | { |
| 109 | u32 page_count; |
| 110 | |
| 111 | if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) |
| 112 | return; |
| 113 | |
| 114 | page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, |
| 115 | rd_dev->sg_table_count); |
| 116 | |
| 117 | pr_debug("CORE_RD[%u] - Released device space for Ramdisk" |
| 118 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", |
| 119 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, |
| 120 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); |
| 121 | |
| 122 | rd_dev->sg_table_array = NULL; |
| 123 | rd_dev->sg_table_count = 0; |
| 124 | } |
| 125 | |
| 126 | |
| 127 | /* rd_build_device_space(): |
| 128 | * |
| 129 | * |
| 130 | */ |
| 131 | static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, |
| 132 | u32 total_sg_needed, unsigned char init_payload) |
| 133 | { |
| 134 | u32 i = 0, j, page_offset = 0, sg_per_table; |
| 135 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
| 136 | sizeof(struct scatterlist)); |
| 137 | struct page *pg; |
| 138 | struct scatterlist *sg; |
| 139 | unsigned char *p; |
| 140 | |
| 141 | while (total_sg_needed) { |
| 142 | unsigned int chain_entry = 0; |
| 143 | |
| 144 | sg_per_table = (total_sg_needed > max_sg_per_table) ? |
| 145 | max_sg_per_table : total_sg_needed; |
| 146 | |
| 147 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN |
| 148 | |
| 149 | /* |
| 150 | * Reserve extra element for chain entry |
| 151 | */ |
| 152 | if (sg_per_table < total_sg_needed) |
| 153 | chain_entry = 1; |
| 154 | |
| 155 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ |
| 156 | |
| 157 | sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), |
| 158 | GFP_KERNEL); |
| 159 | if (!sg) { |
| 160 | pr_err("Unable to allocate scatterlist array" |
| 161 | " for struct rd_dev\n"); |
| 162 | return -ENOMEM; |
| 163 | } |
| 164 | |
| 165 | sg_init_table(sg, sg_per_table + chain_entry); |
| 166 | |
| 167 | #ifdef CONFIG_ARCH_HAS_SG_CHAIN |
| 168 | |
| 169 | if (i > 0) { |
| 170 | sg_chain(sg_table[i - 1].sg_table, |
| 171 | max_sg_per_table + 1, sg); |
| 172 | } |
| 173 | |
| 174 | #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ |
| 175 | |
| 176 | sg_table[i].sg_table = sg; |
| 177 | sg_table[i].rd_sg_count = sg_per_table; |
| 178 | sg_table[i].page_start_offset = page_offset; |
| 179 | sg_table[i++].page_end_offset = (page_offset + sg_per_table) |
| 180 | - 1; |
| 181 | |
| 182 | for (j = 0; j < sg_per_table; j++) { |
| 183 | pg = alloc_pages(GFP_KERNEL, 0); |
| 184 | if (!pg) { |
| 185 | pr_err("Unable to allocate scatterlist" |
| 186 | " pages for struct rd_dev_sg_table\n"); |
| 187 | return -ENOMEM; |
| 188 | } |
| 189 | sg_assign_page(&sg[j], pg); |
| 190 | sg[j].length = PAGE_SIZE; |
| 191 | |
| 192 | p = kmap(pg); |
| 193 | memset(p, init_payload, PAGE_SIZE); |
| 194 | kunmap(pg); |
| 195 | } |
| 196 | |
| 197 | page_offset += sg_per_table; |
| 198 | total_sg_needed -= sg_per_table; |
| 199 | } |
| 200 | |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | static int rd_build_device_space(struct rd_dev *rd_dev) |
| 205 | { |
| 206 | struct rd_dev_sg_table *sg_table; |
| 207 | u32 sg_tables, total_sg_needed; |
| 208 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
| 209 | sizeof(struct scatterlist)); |
| 210 | int rc; |
| 211 | |
| 212 | if (rd_dev->rd_page_count <= 0) { |
| 213 | pr_err("Illegal page count: %u for Ramdisk device\n", |
| 214 | rd_dev->rd_page_count); |
| 215 | return -EINVAL; |
| 216 | } |
| 217 | |
| 218 | /* Don't need backing pages for NULLIO */ |
| 219 | if (rd_dev->rd_flags & RDF_NULLIO) |
| 220 | return 0; |
| 221 | |
| 222 | total_sg_needed = rd_dev->rd_page_count; |
| 223 | |
| 224 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; |
| 225 | |
| 226 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); |
| 227 | if (!sg_table) { |
| 228 | pr_err("Unable to allocate memory for Ramdisk" |
| 229 | " scatterlist tables\n"); |
| 230 | return -ENOMEM; |
| 231 | } |
| 232 | |
| 233 | rd_dev->sg_table_array = sg_table; |
| 234 | rd_dev->sg_table_count = sg_tables; |
| 235 | |
| 236 | rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); |
| 237 | if (rc) |
| 238 | return rc; |
| 239 | |
| 240 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" |
| 241 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, |
| 242 | rd_dev->rd_dev_id, rd_dev->rd_page_count, |
| 243 | rd_dev->sg_table_count); |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | static void rd_release_prot_space(struct rd_dev *rd_dev) |
| 249 | { |
| 250 | u32 page_count; |
| 251 | |
| 252 | if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) |
| 253 | return; |
| 254 | |
| 255 | page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, |
| 256 | rd_dev->sg_prot_count); |
| 257 | |
| 258 | pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" |
| 259 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", |
| 260 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, |
| 261 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); |
| 262 | |
| 263 | rd_dev->sg_prot_array = NULL; |
| 264 | rd_dev->sg_prot_count = 0; |
| 265 | } |
| 266 | |
| 267 | static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) |
| 268 | { |
| 269 | struct rd_dev_sg_table *sg_table; |
| 270 | u32 total_sg_needed, sg_tables; |
| 271 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
| 272 | sizeof(struct scatterlist)); |
| 273 | int rc; |
| 274 | |
| 275 | if (rd_dev->rd_flags & RDF_NULLIO) |
| 276 | return 0; |
| 277 | /* |
| 278 | * prot_length=8byte dif data |
| 279 | * tot sg needed = rd_page_count * (PGSZ/block_size) * |
| 280 | * (prot_length/block_size) + pad |
| 281 | * PGSZ canceled each other. |
| 282 | */ |
| 283 | total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; |
| 284 | |
| 285 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; |
| 286 | |
| 287 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); |
| 288 | if (!sg_table) { |
| 289 | pr_err("Unable to allocate memory for Ramdisk protection" |
| 290 | " scatterlist tables\n"); |
| 291 | return -ENOMEM; |
| 292 | } |
| 293 | |
| 294 | rd_dev->sg_prot_array = sg_table; |
| 295 | rd_dev->sg_prot_count = sg_tables; |
| 296 | |
| 297 | rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); |
| 298 | if (rc) |
| 299 | return rc; |
| 300 | |
| 301 | pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" |
| 302 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, |
| 303 | rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); |
| 304 | |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) |
| 309 | { |
| 310 | struct rd_dev *rd_dev; |
| 311 | struct rd_host *rd_host = hba->hba_ptr; |
| 312 | |
| 313 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); |
| 314 | if (!rd_dev) { |
| 315 | pr_err("Unable to allocate memory for struct rd_dev\n"); |
| 316 | return NULL; |
| 317 | } |
| 318 | |
| 319 | rd_dev->rd_host = rd_host; |
| 320 | |
| 321 | return &rd_dev->dev; |
| 322 | } |
| 323 | |
| 324 | static int rd_configure_device(struct se_device *dev) |
| 325 | { |
| 326 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 327 | struct rd_host *rd_host = dev->se_hba->hba_ptr; |
| 328 | int ret; |
| 329 | |
| 330 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { |
| 331 | pr_debug("Missing rd_pages= parameter\n"); |
| 332 | return -EINVAL; |
| 333 | } |
| 334 | |
| 335 | ret = rd_build_device_space(rd_dev); |
| 336 | if (ret < 0) |
| 337 | goto fail; |
| 338 | |
| 339 | dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; |
| 340 | dev->dev_attrib.hw_max_sectors = UINT_MAX; |
| 341 | dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; |
| 342 | |
| 343 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; |
| 344 | |
| 345 | pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" |
| 346 | " %u pages in %u tables, %lu total bytes\n", |
| 347 | rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, |
| 348 | rd_dev->sg_table_count, |
| 349 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); |
| 350 | |
| 351 | return 0; |
| 352 | |
| 353 | fail: |
| 354 | rd_release_device_space(rd_dev); |
| 355 | return ret; |
| 356 | } |
| 357 | |
| 358 | static void rd_free_device(struct se_device *dev) |
| 359 | { |
| 360 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 361 | |
| 362 | rd_release_device_space(rd_dev); |
| 363 | kfree(rd_dev); |
| 364 | } |
| 365 | |
| 366 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) |
| 367 | { |
| 368 | struct rd_dev_sg_table *sg_table; |
| 369 | u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
| 370 | sizeof(struct scatterlist)); |
| 371 | |
| 372 | i = page / sg_per_table; |
| 373 | if (i < rd_dev->sg_table_count) { |
| 374 | sg_table = &rd_dev->sg_table_array[i]; |
| 375 | if ((sg_table->page_start_offset <= page) && |
| 376 | (sg_table->page_end_offset >= page)) |
| 377 | return sg_table; |
| 378 | } |
| 379 | |
| 380 | pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", |
| 381 | page); |
| 382 | |
| 383 | return NULL; |
| 384 | } |
| 385 | |
| 386 | static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) |
| 387 | { |
| 388 | struct rd_dev_sg_table *sg_table; |
| 389 | u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / |
| 390 | sizeof(struct scatterlist)); |
| 391 | |
| 392 | i = page / sg_per_table; |
| 393 | if (i < rd_dev->sg_prot_count) { |
| 394 | sg_table = &rd_dev->sg_prot_array[i]; |
| 395 | if ((sg_table->page_start_offset <= page) && |
| 396 | (sg_table->page_end_offset >= page)) |
| 397 | return sg_table; |
| 398 | } |
| 399 | |
| 400 | pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", |
| 401 | page); |
| 402 | |
| 403 | return NULL; |
| 404 | } |
| 405 | |
| 406 | typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int, |
| 407 | unsigned int, struct scatterlist *, int); |
| 408 | |
| 409 | static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify) |
| 410 | { |
| 411 | struct se_device *se_dev = cmd->se_dev; |
| 412 | struct rd_dev *dev = RD_DEV(se_dev); |
| 413 | struct rd_dev_sg_table *prot_table; |
| 414 | bool need_to_release = false; |
| 415 | struct scatterlist *prot_sg; |
| 416 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; |
| 417 | u32 prot_offset, prot_page; |
| 418 | u32 prot_npages __maybe_unused; |
| 419 | u64 tmp; |
| 420 | sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 421 | |
| 422 | tmp = cmd->t_task_lba * se_dev->prot_length; |
| 423 | prot_offset = do_div(tmp, PAGE_SIZE); |
| 424 | prot_page = tmp; |
| 425 | |
| 426 | prot_table = rd_get_prot_table(dev, prot_page); |
| 427 | if (!prot_table) |
| 428 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 429 | |
| 430 | prot_sg = &prot_table->sg_table[prot_page - |
| 431 | prot_table->page_start_offset]; |
| 432 | |
| 433 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
| 434 | |
| 435 | prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, |
| 436 | PAGE_SIZE); |
| 437 | |
| 438 | /* |
| 439 | * Allocate temporaly contiguous scatterlist entries if prot pages |
| 440 | * straddles multiple scatterlist tables. |
| 441 | */ |
| 442 | if (prot_table->page_end_offset < prot_page + prot_npages - 1) { |
| 443 | int i; |
| 444 | |
| 445 | prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); |
| 446 | if (!prot_sg) |
| 447 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 448 | |
| 449 | need_to_release = true; |
| 450 | sg_init_table(prot_sg, prot_npages); |
| 451 | |
| 452 | for (i = 0; i < prot_npages; i++) { |
| 453 | if (prot_page + i > prot_table->page_end_offset) { |
| 454 | prot_table = rd_get_prot_table(dev, |
| 455 | prot_page + i); |
| 456 | if (!prot_table) { |
| 457 | kfree(prot_sg); |
| 458 | return rc; |
| 459 | } |
| 460 | sg_unmark_end(&prot_sg[i - 1]); |
| 461 | } |
| 462 | prot_sg[i] = prot_table->sg_table[prot_page + i - |
| 463 | prot_table->page_start_offset]; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ |
| 468 | |
| 469 | rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); |
| 470 | if (need_to_release) |
| 471 | kfree(prot_sg); |
| 472 | |
| 473 | return rc; |
| 474 | } |
| 475 | |
| 476 | static sense_reason_t |
| 477 | rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, |
| 478 | enum dma_data_direction data_direction) |
| 479 | { |
| 480 | struct se_device *se_dev = cmd->se_dev; |
| 481 | struct rd_dev *dev = RD_DEV(se_dev); |
| 482 | struct rd_dev_sg_table *table; |
| 483 | struct scatterlist *rd_sg; |
| 484 | struct sg_mapping_iter m; |
| 485 | u32 rd_offset; |
| 486 | u32 rd_size; |
| 487 | u32 rd_page; |
| 488 | u32 src_len; |
| 489 | u64 tmp; |
| 490 | sense_reason_t rc; |
| 491 | |
| 492 | if (dev->rd_flags & RDF_NULLIO) { |
| 493 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
| 494 | return 0; |
| 495 | } |
| 496 | |
| 497 | tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; |
| 498 | rd_offset = do_div(tmp, PAGE_SIZE); |
| 499 | rd_page = tmp; |
| 500 | rd_size = cmd->data_length; |
| 501 | |
| 502 | table = rd_get_sg_table(dev, rd_page); |
| 503 | if (!table) |
| 504 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 505 | |
| 506 | rd_sg = &table->sg_table[rd_page - table->page_start_offset]; |
| 507 | |
| 508 | pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", |
| 509 | dev->rd_dev_id, |
| 510 | data_direction == DMA_FROM_DEVICE ? "Read" : "Write", |
| 511 | cmd->t_task_lba, rd_size, rd_page, rd_offset); |
| 512 | |
| 513 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
| 514 | data_direction == DMA_TO_DEVICE) { |
| 515 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_write); |
| 516 | if (rc) |
| 517 | return rc; |
| 518 | } |
| 519 | |
| 520 | src_len = PAGE_SIZE - rd_offset; |
| 521 | sg_miter_start(&m, sgl, sgl_nents, |
| 522 | data_direction == DMA_FROM_DEVICE ? |
| 523 | SG_MITER_TO_SG : SG_MITER_FROM_SG); |
| 524 | while (rd_size) { |
| 525 | u32 len; |
| 526 | void *rd_addr; |
| 527 | |
| 528 | sg_miter_next(&m); |
| 529 | if (!(u32)m.length) { |
| 530 | pr_debug("RD[%u]: invalid sgl %p len %zu\n", |
| 531 | dev->rd_dev_id, m.addr, m.length); |
| 532 | sg_miter_stop(&m); |
| 533 | return TCM_INCORRECT_AMOUNT_OF_DATA; |
| 534 | } |
| 535 | len = min((u32)m.length, src_len); |
| 536 | if (len > rd_size) { |
| 537 | pr_debug("RD[%u]: size underrun page %d offset %d " |
| 538 | "size %d\n", dev->rd_dev_id, |
| 539 | rd_page, rd_offset, rd_size); |
| 540 | len = rd_size; |
| 541 | } |
| 542 | m.consumed = len; |
| 543 | |
| 544 | rd_addr = sg_virt(rd_sg) + rd_offset; |
| 545 | |
| 546 | if (data_direction == DMA_FROM_DEVICE) |
| 547 | memcpy(m.addr, rd_addr, len); |
| 548 | else |
| 549 | memcpy(rd_addr, m.addr, len); |
| 550 | |
| 551 | rd_size -= len; |
| 552 | if (!rd_size) |
| 553 | continue; |
| 554 | |
| 555 | src_len -= len; |
| 556 | if (src_len) { |
| 557 | rd_offset += len; |
| 558 | continue; |
| 559 | } |
| 560 | |
| 561 | /* rd page completed, next one please */ |
| 562 | rd_page++; |
| 563 | rd_offset = 0; |
| 564 | src_len = PAGE_SIZE; |
| 565 | if (rd_page <= table->page_end_offset) { |
| 566 | rd_sg++; |
| 567 | continue; |
| 568 | } |
| 569 | |
| 570 | table = rd_get_sg_table(dev, rd_page); |
| 571 | if (!table) { |
| 572 | sg_miter_stop(&m); |
| 573 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| 574 | } |
| 575 | |
| 576 | /* since we increment, the first sg entry is correct */ |
| 577 | rd_sg = table->sg_table; |
| 578 | } |
| 579 | sg_miter_stop(&m); |
| 580 | |
| 581 | if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && |
| 582 | data_direction == DMA_FROM_DEVICE) { |
| 583 | rc = rd_do_prot_rw(cmd, sbc_dif_verify_read); |
| 584 | if (rc) |
| 585 | return rc; |
| 586 | } |
| 587 | |
| 588 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | enum { |
| 593 | Opt_rd_pages, Opt_rd_nullio, Opt_err |
| 594 | }; |
| 595 | |
| 596 | static match_table_t tokens = { |
| 597 | {Opt_rd_pages, "rd_pages=%d"}, |
| 598 | {Opt_rd_nullio, "rd_nullio=%d"}, |
| 599 | {Opt_err, NULL} |
| 600 | }; |
| 601 | |
| 602 | static ssize_t rd_set_configfs_dev_params(struct se_device *dev, |
| 603 | const char *page, ssize_t count) |
| 604 | { |
| 605 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 606 | char *orig, *ptr, *opts; |
| 607 | substring_t args[MAX_OPT_ARGS]; |
| 608 | int ret = 0, arg, token; |
| 609 | |
| 610 | opts = kstrdup(page, GFP_KERNEL); |
| 611 | if (!opts) |
| 612 | return -ENOMEM; |
| 613 | |
| 614 | orig = opts; |
| 615 | |
| 616 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
| 617 | if (!*ptr) |
| 618 | continue; |
| 619 | |
| 620 | token = match_token(ptr, tokens, args); |
| 621 | switch (token) { |
| 622 | case Opt_rd_pages: |
| 623 | match_int(args, &arg); |
| 624 | rd_dev->rd_page_count = arg; |
| 625 | pr_debug("RAMDISK: Referencing Page" |
| 626 | " Count: %u\n", rd_dev->rd_page_count); |
| 627 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; |
| 628 | break; |
| 629 | case Opt_rd_nullio: |
| 630 | match_int(args, &arg); |
| 631 | if (arg != 1) |
| 632 | break; |
| 633 | |
| 634 | pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); |
| 635 | rd_dev->rd_flags |= RDF_NULLIO; |
| 636 | break; |
| 637 | default: |
| 638 | break; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | kfree(orig); |
| 643 | return (!ret) ? count : ret; |
| 644 | } |
| 645 | |
| 646 | static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) |
| 647 | { |
| 648 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 649 | |
| 650 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", |
| 651 | rd_dev->rd_dev_id); |
| 652 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" |
| 653 | " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, |
| 654 | PAGE_SIZE, rd_dev->sg_table_count, |
| 655 | !!(rd_dev->rd_flags & RDF_NULLIO)); |
| 656 | return bl; |
| 657 | } |
| 658 | |
| 659 | static sector_t rd_get_blocks(struct se_device *dev) |
| 660 | { |
| 661 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 662 | |
| 663 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / |
| 664 | dev->dev_attrib.block_size) - 1; |
| 665 | |
| 666 | return blocks_long; |
| 667 | } |
| 668 | |
| 669 | static int rd_init_prot(struct se_device *dev) |
| 670 | { |
| 671 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 672 | |
| 673 | if (!dev->dev_attrib.pi_prot_type) |
| 674 | return 0; |
| 675 | |
| 676 | return rd_build_prot_space(rd_dev, dev->prot_length, |
| 677 | dev->dev_attrib.block_size); |
| 678 | } |
| 679 | |
| 680 | static void rd_free_prot(struct se_device *dev) |
| 681 | { |
| 682 | struct rd_dev *rd_dev = RD_DEV(dev); |
| 683 | |
| 684 | rd_release_prot_space(rd_dev); |
| 685 | } |
| 686 | |
| 687 | static struct sbc_ops rd_sbc_ops = { |
| 688 | .execute_rw = rd_execute_rw, |
| 689 | }; |
| 690 | |
| 691 | static sense_reason_t |
| 692 | rd_parse_cdb(struct se_cmd *cmd) |
| 693 | { |
| 694 | return sbc_parse_cdb(cmd, &rd_sbc_ops); |
| 695 | } |
| 696 | |
| 697 | DEF_TB_DEFAULT_ATTRIBS(rd_mcp); |
| 698 | |
| 699 | static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { |
| 700 | &rd_mcp_dev_attrib_emulate_model_alias.attr, |
| 701 | &rd_mcp_dev_attrib_emulate_dpo.attr, |
| 702 | &rd_mcp_dev_attrib_emulate_fua_write.attr, |
| 703 | &rd_mcp_dev_attrib_emulate_fua_read.attr, |
| 704 | &rd_mcp_dev_attrib_emulate_write_cache.attr, |
| 705 | &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr, |
| 706 | &rd_mcp_dev_attrib_emulate_tas.attr, |
| 707 | &rd_mcp_dev_attrib_emulate_tpu.attr, |
| 708 | &rd_mcp_dev_attrib_emulate_tpws.attr, |
| 709 | &rd_mcp_dev_attrib_emulate_caw.attr, |
| 710 | &rd_mcp_dev_attrib_emulate_3pc.attr, |
| 711 | &rd_mcp_dev_attrib_pi_prot_type.attr, |
| 712 | &rd_mcp_dev_attrib_hw_pi_prot_type.attr, |
| 713 | &rd_mcp_dev_attrib_pi_prot_format.attr, |
| 714 | &rd_mcp_dev_attrib_enforce_pr_isids.attr, |
| 715 | &rd_mcp_dev_attrib_is_nonrot.attr, |
| 716 | &rd_mcp_dev_attrib_emulate_rest_reord.attr, |
| 717 | &rd_mcp_dev_attrib_force_pr_aptpl.attr, |
| 718 | &rd_mcp_dev_attrib_hw_block_size.attr, |
| 719 | &rd_mcp_dev_attrib_block_size.attr, |
| 720 | &rd_mcp_dev_attrib_hw_max_sectors.attr, |
| 721 | &rd_mcp_dev_attrib_optimal_sectors.attr, |
| 722 | &rd_mcp_dev_attrib_hw_queue_depth.attr, |
| 723 | &rd_mcp_dev_attrib_queue_depth.attr, |
| 724 | &rd_mcp_dev_attrib_max_unmap_lba_count.attr, |
| 725 | &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr, |
| 726 | &rd_mcp_dev_attrib_unmap_granularity.attr, |
| 727 | &rd_mcp_dev_attrib_unmap_granularity_alignment.attr, |
| 728 | &rd_mcp_dev_attrib_max_write_same_len.attr, |
| 729 | NULL, |
| 730 | }; |
| 731 | |
| 732 | static struct se_subsystem_api rd_mcp_template = { |
| 733 | .name = "rd_mcp", |
| 734 | .inquiry_prod = "RAMDISK-MCP", |
| 735 | .inquiry_rev = RD_MCP_VERSION, |
| 736 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, |
| 737 | .attach_hba = rd_attach_hba, |
| 738 | .detach_hba = rd_detach_hba, |
| 739 | .alloc_device = rd_alloc_device, |
| 740 | .configure_device = rd_configure_device, |
| 741 | .free_device = rd_free_device, |
| 742 | .parse_cdb = rd_parse_cdb, |
| 743 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
| 744 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
| 745 | .get_device_type = sbc_get_device_type, |
| 746 | .get_blocks = rd_get_blocks, |
| 747 | .init_prot = rd_init_prot, |
| 748 | .free_prot = rd_free_prot, |
| 749 | }; |
| 750 | |
| 751 | int __init rd_module_init(void) |
| 752 | { |
| 753 | struct target_backend_cits *tbc = &rd_mcp_template.tb_cits; |
| 754 | int ret; |
| 755 | |
| 756 | target_core_setup_sub_cits(&rd_mcp_template); |
| 757 | tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs; |
| 758 | |
| 759 | ret = transport_subsystem_register(&rd_mcp_template); |
| 760 | if (ret < 0) { |
| 761 | return ret; |
| 762 | } |
| 763 | |
| 764 | return 0; |
| 765 | } |
| 766 | |
| 767 | void rd_module_exit(void) |
| 768 | { |
| 769 | transport_subsystem_release(&rd_mcp_template); |
| 770 | } |