Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / scsi / scsi_debug.c
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 *
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
13 *
14 *
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
16 *
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
26 */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
79
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
82
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
84
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
91 */
92 #define DEF_ATO 1
93 #define DEF_DELAY 1
94 #define DEF_DEV_SIZE_MB 8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE 0
98 #define DEF_EVERY_NTH 0
99 #define DEF_FAKE_RW 0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LOWEST_ALIGNED 0
105 #define DEF_NO_LUN_0 0
106 #define DEF_NUM_PARTS 0
107 #define DEF_OPTS 0
108 #define DEF_OPT_BLKS 64
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_PTYPE 0
111 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
112 #define DEF_SECTOR_SIZE 512
113 #define DEF_UNMAP_ALIGNMENT 0
114 #define DEF_UNMAP_GRANULARITY 1
115 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
116 #define DEF_UNMAP_MAX_DESC 256
117 #define DEF_VIRTUAL_GB 0
118 #define DEF_VPD_USE_HOSTNO 1
119 #define DEF_WRITESAME_LENGTH 0xFFFF
120
121 /* bit mask values for scsi_debug_opts */
122 #define SCSI_DEBUG_OPT_NOISE 1
123 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
124 #define SCSI_DEBUG_OPT_TIMEOUT 4
125 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
126 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
127 #define SCSI_DEBUG_OPT_DIF_ERR 32
128 #define SCSI_DEBUG_OPT_DIX_ERR 64
129 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
130 /* When "every_nth" > 0 then modulo "every_nth" commands:
131 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
132 * - a RECOVERED_ERROR is simulated on successful read and write
133 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
134 * - a TRANSPORT_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
136 *
137 * When "every_nth" < 0 then after "- every_nth" commands:
138 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
139 * - a RECOVERED_ERROR is simulated on successful read and write
140 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
141 * - a TRANSPORT_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
143 * This will continue until some other action occurs (e.g. the user
144 * writing a new value (other than -1 or 1) to every_nth via sysfs).
145 */
146
147 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
148 * sector on read commands: */
149 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
150 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
151
152 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
153 * or "peripheral device" addressing (value 0) */
154 #define SAM2_LUN_ADDRESS_METHOD 0
155 #define SAM2_WLUN_REPORT_LUNS 0xc101
156
157 /* Can queue up to this number of commands. Typically commands that
158 * that have a non-zero delay are queued. */
159 #define SCSI_DEBUG_CANQUEUE 255
160
161 static int scsi_debug_add_host = DEF_NUM_HOST;
162 static int scsi_debug_ato = DEF_ATO;
163 static int scsi_debug_delay = DEF_DELAY;
164 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
165 static int scsi_debug_dif = DEF_DIF;
166 static int scsi_debug_dix = DEF_DIX;
167 static int scsi_debug_dsense = DEF_D_SENSE;
168 static int scsi_debug_every_nth = DEF_EVERY_NTH;
169 static int scsi_debug_fake_rw = DEF_FAKE_RW;
170 static int scsi_debug_guard = DEF_GUARD;
171 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
172 static int scsi_debug_max_luns = DEF_MAX_LUNS;
173 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
174 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
175 static int scsi_debug_no_uld = 0;
176 static int scsi_debug_num_parts = DEF_NUM_PARTS;
177 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
178 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
179 static int scsi_debug_opts = DEF_OPTS;
180 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
181 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
182 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
183 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
184 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
185 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
186 static unsigned int scsi_debug_lbpu = DEF_LBPU;
187 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
188 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
189 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
190 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
191 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
192 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
193 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
194
195 static int scsi_debug_cmnd_count = 0;
196
197 #define DEV_READONLY(TGT) (0)
198 #define DEV_REMOVEABLE(TGT) (0)
199
200 static unsigned int sdebug_store_sectors;
201 static sector_t sdebug_capacity; /* in sectors */
202
203 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
204 may still need them */
205 static int sdebug_heads; /* heads per disk */
206 static int sdebug_cylinders_per; /* cylinders per surface */
207 static int sdebug_sectors_per; /* sectors per cylinder */
208
209 #define SDEBUG_MAX_PARTS 4
210
211 #define SDEBUG_SENSE_LEN 32
212
213 #define SCSI_DEBUG_MAX_CMD_LEN 32
214
215 static unsigned int scsi_debug_lbp(void)
216 {
217 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
218 }
219
220 struct sdebug_dev_info {
221 struct list_head dev_list;
222 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
223 unsigned int channel;
224 unsigned int target;
225 unsigned int lun;
226 struct sdebug_host_info *sdbg_host;
227 unsigned int wlun;
228 char reset;
229 char stopped;
230 char used;
231 };
232
233 struct sdebug_host_info {
234 struct list_head host_list;
235 struct Scsi_Host *shost;
236 struct device dev;
237 struct list_head dev_info_list;
238 };
239
240 #define to_sdebug_host(d) \
241 container_of(d, struct sdebug_host_info, dev)
242
243 static LIST_HEAD(sdebug_host_list);
244 static DEFINE_SPINLOCK(sdebug_host_list_lock);
245
246 typedef void (* done_funct_t) (struct scsi_cmnd *);
247
248 struct sdebug_queued_cmd {
249 int in_use;
250 struct timer_list cmnd_timer;
251 done_funct_t done_funct;
252 struct scsi_cmnd * a_cmnd;
253 int scsi_result;
254 };
255 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
256
257 static unsigned char * fake_storep; /* ramdisk storage */
258 static unsigned char *dif_storep; /* protection info */
259 static void *map_storep; /* provisioning map */
260
261 static unsigned long map_size;
262 static int num_aborts = 0;
263 static int num_dev_resets = 0;
264 static int num_bus_resets = 0;
265 static int num_host_resets = 0;
266 static int dix_writes;
267 static int dix_reads;
268 static int dif_errors;
269
270 static DEFINE_SPINLOCK(queued_arr_lock);
271 static DEFINE_RWLOCK(atomic_rw);
272
273 static char sdebug_proc_name[] = "scsi_debug";
274
275 static struct bus_type pseudo_lld_bus;
276
277 static inline sector_t dif_offset(sector_t sector)
278 {
279 return sector << 3;
280 }
281
282 static struct device_driver sdebug_driverfs_driver = {
283 .name = sdebug_proc_name,
284 .bus = &pseudo_lld_bus,
285 };
286
287 static const int check_condition_result =
288 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
289
290 static const int illegal_condition_result =
291 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
292
293 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
294 0, 0, 0x2, 0x4b};
295 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
296 0, 0, 0x0, 0x0};
297
298 static int sdebug_add_adapter(void);
299 static void sdebug_remove_adapter(void);
300
301 static void sdebug_max_tgts_luns(void)
302 {
303 struct sdebug_host_info *sdbg_host;
304 struct Scsi_Host *hpnt;
305
306 spin_lock(&sdebug_host_list_lock);
307 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
308 hpnt = sdbg_host->shost;
309 if ((hpnt->this_id >= 0) &&
310 (scsi_debug_num_tgts > hpnt->this_id))
311 hpnt->max_id = scsi_debug_num_tgts + 1;
312 else
313 hpnt->max_id = scsi_debug_num_tgts;
314 /* scsi_debug_max_luns; */
315 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
316 }
317 spin_unlock(&sdebug_host_list_lock);
318 }
319
320 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
321 int asc, int asq)
322 {
323 unsigned char *sbuff;
324
325 sbuff = devip->sense_buff;
326 memset(sbuff, 0, SDEBUG_SENSE_LEN);
327
328 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
329
330 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
331 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
332 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
333 }
334
335 static void get_data_transfer_info(unsigned char *cmd,
336 unsigned long long *lba, unsigned int *num,
337 u32 *ei_lba)
338 {
339 *ei_lba = 0;
340
341 switch (*cmd) {
342 case VARIABLE_LENGTH_CMD:
343 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
344 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
345 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
346 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
347
348 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
349 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
350
351 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
352 (u32)cmd[28] << 24;
353 break;
354
355 case WRITE_SAME_16:
356 case WRITE_16:
357 case READ_16:
358 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
359 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
360 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
361 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
362
363 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
364 (u32)cmd[10] << 24;
365 break;
366 case WRITE_12:
367 case READ_12:
368 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
369 (u32)cmd[2] << 24;
370
371 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
372 (u32)cmd[6] << 24;
373 break;
374 case WRITE_SAME:
375 case WRITE_10:
376 case READ_10:
377 case XDWRITEREAD_10:
378 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
379 (u32)cmd[2] << 24;
380
381 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
382 break;
383 case WRITE_6:
384 case READ_6:
385 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
386 (u32)(cmd[1] & 0x1f) << 16;
387 *num = (0 == cmd[4]) ? 256 : cmd[4];
388 break;
389 default:
390 break;
391 }
392 }
393
394 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
395 {
396 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
397 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
398 }
399 return -EINVAL;
400 /* return -ENOTTY; // correct return but upsets fdisk */
401 }
402
403 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
404 struct sdebug_dev_info * devip)
405 {
406 if (devip->reset) {
407 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
408 printk(KERN_INFO "scsi_debug: Reporting Unit "
409 "attention: power on reset\n");
410 devip->reset = 0;
411 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
412 return check_condition_result;
413 }
414 if ((0 == reset_only) && devip->stopped) {
415 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
416 printk(KERN_INFO "scsi_debug: Reporting Not "
417 "ready: initializing command required\n");
418 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
419 0x2);
420 return check_condition_result;
421 }
422 return 0;
423 }
424
425 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
426 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
427 int arr_len)
428 {
429 int act_len;
430 struct scsi_data_buffer *sdb = scsi_in(scp);
431
432 if (!sdb->length)
433 return 0;
434 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
435 return (DID_ERROR << 16);
436
437 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
438 arr, arr_len);
439 if (sdb->resid)
440 sdb->resid -= act_len;
441 else
442 sdb->resid = scsi_bufflen(scp) - act_len;
443
444 return 0;
445 }
446
447 /* Returns number of bytes fetched into 'arr' or -1 if error. */
448 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
449 int arr_len)
450 {
451 if (!scsi_bufflen(scp))
452 return 0;
453 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
454 return -1;
455
456 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
457 }
458
459
460 static const char * inq_vendor_id = "Linux ";
461 static const char * inq_product_id = "scsi_debug ";
462 static const char * inq_product_rev = "0004";
463
464 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
465 int target_dev_id, int dev_id_num,
466 const char * dev_id_str,
467 int dev_id_str_len)
468 {
469 int num, port_a;
470 char b[32];
471
472 port_a = target_dev_id + 1;
473 /* T10 vendor identifier field format (faked) */
474 arr[0] = 0x2; /* ASCII */
475 arr[1] = 0x1;
476 arr[2] = 0x0;
477 memcpy(&arr[4], inq_vendor_id, 8);
478 memcpy(&arr[12], inq_product_id, 16);
479 memcpy(&arr[28], dev_id_str, dev_id_str_len);
480 num = 8 + 16 + dev_id_str_len;
481 arr[3] = num;
482 num += 4;
483 if (dev_id_num >= 0) {
484 /* NAA-5, Logical unit identifier (binary) */
485 arr[num++] = 0x1; /* binary (not necessarily sas) */
486 arr[num++] = 0x3; /* PIV=0, lu, naa */
487 arr[num++] = 0x0;
488 arr[num++] = 0x8;
489 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
490 arr[num++] = 0x33;
491 arr[num++] = 0x33;
492 arr[num++] = 0x30;
493 arr[num++] = (dev_id_num >> 24);
494 arr[num++] = (dev_id_num >> 16) & 0xff;
495 arr[num++] = (dev_id_num >> 8) & 0xff;
496 arr[num++] = dev_id_num & 0xff;
497 /* Target relative port number */
498 arr[num++] = 0x61; /* proto=sas, binary */
499 arr[num++] = 0x94; /* PIV=1, target port, rel port */
500 arr[num++] = 0x0; /* reserved */
501 arr[num++] = 0x4; /* length */
502 arr[num++] = 0x0; /* reserved */
503 arr[num++] = 0x0; /* reserved */
504 arr[num++] = 0x0;
505 arr[num++] = 0x1; /* relative port A */
506 }
507 /* NAA-5, Target port identifier */
508 arr[num++] = 0x61; /* proto=sas, binary */
509 arr[num++] = 0x93; /* piv=1, target port, naa */
510 arr[num++] = 0x0;
511 arr[num++] = 0x8;
512 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
513 arr[num++] = 0x22;
514 arr[num++] = 0x22;
515 arr[num++] = 0x20;
516 arr[num++] = (port_a >> 24);
517 arr[num++] = (port_a >> 16) & 0xff;
518 arr[num++] = (port_a >> 8) & 0xff;
519 arr[num++] = port_a & 0xff;
520 /* NAA-5, Target port group identifier */
521 arr[num++] = 0x61; /* proto=sas, binary */
522 arr[num++] = 0x95; /* piv=1, target port group id */
523 arr[num++] = 0x0;
524 arr[num++] = 0x4;
525 arr[num++] = 0;
526 arr[num++] = 0;
527 arr[num++] = (port_group_id >> 8) & 0xff;
528 arr[num++] = port_group_id & 0xff;
529 /* NAA-5, Target device identifier */
530 arr[num++] = 0x61; /* proto=sas, binary */
531 arr[num++] = 0xa3; /* piv=1, target device, naa */
532 arr[num++] = 0x0;
533 arr[num++] = 0x8;
534 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
535 arr[num++] = 0x22;
536 arr[num++] = 0x22;
537 arr[num++] = 0x20;
538 arr[num++] = (target_dev_id >> 24);
539 arr[num++] = (target_dev_id >> 16) & 0xff;
540 arr[num++] = (target_dev_id >> 8) & 0xff;
541 arr[num++] = target_dev_id & 0xff;
542 /* SCSI name string: Target device identifier */
543 arr[num++] = 0x63; /* proto=sas, UTF-8 */
544 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
545 arr[num++] = 0x0;
546 arr[num++] = 24;
547 memcpy(arr + num, "naa.52222220", 12);
548 num += 12;
549 snprintf(b, sizeof(b), "%08X", target_dev_id);
550 memcpy(arr + num, b, 8);
551 num += 8;
552 memset(arr + num, 0, 4);
553 num += 4;
554 return num;
555 }
556
557
558 static unsigned char vpd84_data[] = {
559 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
560 0x22,0x22,0x22,0x0,0xbb,0x1,
561 0x22,0x22,0x22,0x0,0xbb,0x2,
562 };
563
564 static int inquiry_evpd_84(unsigned char * arr)
565 {
566 memcpy(arr, vpd84_data, sizeof(vpd84_data));
567 return sizeof(vpd84_data);
568 }
569
570 static int inquiry_evpd_85(unsigned char * arr)
571 {
572 int num = 0;
573 const char * na1 = "https://www.kernel.org/config";
574 const char * na2 = "http://www.kernel.org/log";
575 int plen, olen;
576
577 arr[num++] = 0x1; /* lu, storage config */
578 arr[num++] = 0x0; /* reserved */
579 arr[num++] = 0x0;
580 olen = strlen(na1);
581 plen = olen + 1;
582 if (plen % 4)
583 plen = ((plen / 4) + 1) * 4;
584 arr[num++] = plen; /* length, null termianted, padded */
585 memcpy(arr + num, na1, olen);
586 memset(arr + num + olen, 0, plen - olen);
587 num += plen;
588
589 arr[num++] = 0x4; /* lu, logging */
590 arr[num++] = 0x0; /* reserved */
591 arr[num++] = 0x0;
592 olen = strlen(na2);
593 plen = olen + 1;
594 if (plen % 4)
595 plen = ((plen / 4) + 1) * 4;
596 arr[num++] = plen; /* length, null terminated, padded */
597 memcpy(arr + num, na2, olen);
598 memset(arr + num + olen, 0, plen - olen);
599 num += plen;
600
601 return num;
602 }
603
604 /* SCSI ports VPD page */
605 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
606 {
607 int num = 0;
608 int port_a, port_b;
609
610 port_a = target_dev_id + 1;
611 port_b = port_a + 1;
612 arr[num++] = 0x0; /* reserved */
613 arr[num++] = 0x0; /* reserved */
614 arr[num++] = 0x0;
615 arr[num++] = 0x1; /* relative port 1 (primary) */
616 memset(arr + num, 0, 6);
617 num += 6;
618 arr[num++] = 0x0;
619 arr[num++] = 12; /* length tp descriptor */
620 /* naa-5 target port identifier (A) */
621 arr[num++] = 0x61; /* proto=sas, binary */
622 arr[num++] = 0x93; /* PIV=1, target port, NAA */
623 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x8; /* length */
625 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
626 arr[num++] = 0x22;
627 arr[num++] = 0x22;
628 arr[num++] = 0x20;
629 arr[num++] = (port_a >> 24);
630 arr[num++] = (port_a >> 16) & 0xff;
631 arr[num++] = (port_a >> 8) & 0xff;
632 arr[num++] = port_a & 0xff;
633
634 arr[num++] = 0x0; /* reserved */
635 arr[num++] = 0x0; /* reserved */
636 arr[num++] = 0x0;
637 arr[num++] = 0x2; /* relative port 2 (secondary) */
638 memset(arr + num, 0, 6);
639 num += 6;
640 arr[num++] = 0x0;
641 arr[num++] = 12; /* length tp descriptor */
642 /* naa-5 target port identifier (B) */
643 arr[num++] = 0x61; /* proto=sas, binary */
644 arr[num++] = 0x93; /* PIV=1, target port, NAA */
645 arr[num++] = 0x0; /* reserved */
646 arr[num++] = 0x8; /* length */
647 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
648 arr[num++] = 0x22;
649 arr[num++] = 0x22;
650 arr[num++] = 0x20;
651 arr[num++] = (port_b >> 24);
652 arr[num++] = (port_b >> 16) & 0xff;
653 arr[num++] = (port_b >> 8) & 0xff;
654 arr[num++] = port_b & 0xff;
655
656 return num;
657 }
658
659
660 static unsigned char vpd89_data[] = {
661 /* from 4th byte */ 0,0,0,0,
662 'l','i','n','u','x',' ',' ',' ',
663 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
664 '1','2','3','4',
665 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
666 0xec,0,0,0,
667 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
668 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
669 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
670 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
671 0x53,0x41,
672 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
673 0x20,0x20,
674 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
675 0x10,0x80,
676 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
677 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
678 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
680 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
681 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
682 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
687 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
688 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
689 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
702 };
703
704 static int inquiry_evpd_89(unsigned char * arr)
705 {
706 memcpy(arr, vpd89_data, sizeof(vpd89_data));
707 return sizeof(vpd89_data);
708 }
709
710
711 /* Block limits VPD page (SBC-3) */
712 static unsigned char vpdb0_data[] = {
713 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
714 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
715 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
716 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
717 };
718
719 static int inquiry_evpd_b0(unsigned char * arr)
720 {
721 unsigned int gran;
722
723 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
724
725 /* Optimal transfer length granularity */
726 gran = 1 << scsi_debug_physblk_exp;
727 arr[2] = (gran >> 8) & 0xff;
728 arr[3] = gran & 0xff;
729
730 /* Maximum Transfer Length */
731 if (sdebug_store_sectors > 0x400) {
732 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
733 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
734 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
735 arr[7] = sdebug_store_sectors & 0xff;
736 }
737
738 /* Optimal Transfer Length */
739 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
740
741 if (scsi_debug_lbpu) {
742 /* Maximum Unmap LBA Count */
743 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
744
745 /* Maximum Unmap Block Descriptor Count */
746 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
747 }
748
749 /* Unmap Granularity Alignment */
750 if (scsi_debug_unmap_alignment) {
751 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
752 arr[28] |= 0x80; /* UGAVALID */
753 }
754
755 /* Optimal Unmap Granularity */
756 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
757
758 /* Maximum WRITE SAME Length */
759 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
760
761 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
762
763 return sizeof(vpdb0_data);
764 }
765
766 /* Block device characteristics VPD page (SBC-3) */
767 static int inquiry_evpd_b1(unsigned char *arr)
768 {
769 memset(arr, 0, 0x3c);
770 arr[0] = 0;
771 arr[1] = 1; /* non rotating medium (e.g. solid state) */
772 arr[2] = 0;
773 arr[3] = 5; /* less than 1.8" */
774
775 return 0x3c;
776 }
777
778 /* Thin provisioning VPD page (SBC-3) */
779 static int inquiry_evpd_b2(unsigned char *arr)
780 {
781 memset(arr, 0, 0x8);
782 arr[0] = 0; /* threshold exponent */
783
784 if (scsi_debug_lbpu)
785 arr[1] = 1 << 7;
786
787 if (scsi_debug_lbpws)
788 arr[1] |= 1 << 6;
789
790 if (scsi_debug_lbpws10)
791 arr[1] |= 1 << 5;
792
793 return 0x8;
794 }
795
796 #define SDEBUG_LONG_INQ_SZ 96
797 #define SDEBUG_MAX_INQ_ARR_SZ 584
798
799 static int resp_inquiry(struct scsi_cmnd * scp, int target,
800 struct sdebug_dev_info * devip)
801 {
802 unsigned char pq_pdt;
803 unsigned char * arr;
804 unsigned char *cmd = (unsigned char *)scp->cmnd;
805 int alloc_len, n, ret;
806
807 alloc_len = (cmd[3] << 8) + cmd[4];
808 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
809 if (! arr)
810 return DID_REQUEUE << 16;
811 if (devip->wlun)
812 pq_pdt = 0x1e; /* present, wlun */
813 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
814 pq_pdt = 0x7f; /* not present, no device type */
815 else
816 pq_pdt = (scsi_debug_ptype & 0x1f);
817 arr[0] = pq_pdt;
818 if (0x2 & cmd[1]) { /* CMDDT bit set */
819 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
820 0);
821 kfree(arr);
822 return check_condition_result;
823 } else if (0x1 & cmd[1]) { /* EVPD bit set */
824 int lu_id_num, port_group_id, target_dev_id, len;
825 char lu_id_str[6];
826 int host_no = devip->sdbg_host->shost->host_no;
827
828 port_group_id = (((host_no + 1) & 0x7f) << 8) +
829 (devip->channel & 0x7f);
830 if (0 == scsi_debug_vpd_use_hostno)
831 host_no = 0;
832 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
833 (devip->target * 1000) + devip->lun);
834 target_dev_id = ((host_no + 1) * 2000) +
835 (devip->target * 1000) - 3;
836 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
837 if (0 == cmd[2]) { /* supported vital product data pages */
838 arr[1] = cmd[2]; /*sanity */
839 n = 4;
840 arr[n++] = 0x0; /* this page */
841 arr[n++] = 0x80; /* unit serial number */
842 arr[n++] = 0x83; /* device identification */
843 arr[n++] = 0x84; /* software interface ident. */
844 arr[n++] = 0x85; /* management network addresses */
845 arr[n++] = 0x86; /* extended inquiry */
846 arr[n++] = 0x87; /* mode page policy */
847 arr[n++] = 0x88; /* SCSI ports */
848 arr[n++] = 0x89; /* ATA information */
849 arr[n++] = 0xb0; /* Block limits (SBC) */
850 arr[n++] = 0xb1; /* Block characteristics (SBC) */
851 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
852 arr[n++] = 0xb2;
853 arr[3] = n - 4; /* number of supported VPD pages */
854 } else if (0x80 == cmd[2]) { /* unit serial number */
855 arr[1] = cmd[2]; /*sanity */
856 arr[3] = len;
857 memcpy(&arr[4], lu_id_str, len);
858 } else if (0x83 == cmd[2]) { /* device identification */
859 arr[1] = cmd[2]; /*sanity */
860 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
861 target_dev_id, lu_id_num,
862 lu_id_str, len);
863 } else if (0x84 == cmd[2]) { /* Software interface ident. */
864 arr[1] = cmd[2]; /*sanity */
865 arr[3] = inquiry_evpd_84(&arr[4]);
866 } else if (0x85 == cmd[2]) { /* Management network addresses */
867 arr[1] = cmd[2]; /*sanity */
868 arr[3] = inquiry_evpd_85(&arr[4]);
869 } else if (0x86 == cmd[2]) { /* extended inquiry */
870 arr[1] = cmd[2]; /*sanity */
871 arr[3] = 0x3c; /* number of following entries */
872 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
873 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
874 else if (scsi_debug_dif)
875 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
876 else
877 arr[4] = 0x0; /* no protection stuff */
878 arr[5] = 0x7; /* head of q, ordered + simple q's */
879 } else if (0x87 == cmd[2]) { /* mode page policy */
880 arr[1] = cmd[2]; /*sanity */
881 arr[3] = 0x8; /* number of following entries */
882 arr[4] = 0x2; /* disconnect-reconnect mp */
883 arr[6] = 0x80; /* mlus, shared */
884 arr[8] = 0x18; /* protocol specific lu */
885 arr[10] = 0x82; /* mlus, per initiator port */
886 } else if (0x88 == cmd[2]) { /* SCSI Ports */
887 arr[1] = cmd[2]; /*sanity */
888 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
889 } else if (0x89 == cmd[2]) { /* ATA information */
890 arr[1] = cmd[2]; /*sanity */
891 n = inquiry_evpd_89(&arr[4]);
892 arr[2] = (n >> 8);
893 arr[3] = (n & 0xff);
894 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
895 arr[1] = cmd[2]; /*sanity */
896 arr[3] = inquiry_evpd_b0(&arr[4]);
897 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
898 arr[1] = cmd[2]; /*sanity */
899 arr[3] = inquiry_evpd_b1(&arr[4]);
900 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
901 arr[1] = cmd[2]; /*sanity */
902 arr[3] = inquiry_evpd_b2(&arr[4]);
903 } else {
904 /* Illegal request, invalid field in cdb */
905 mk_sense_buffer(devip, ILLEGAL_REQUEST,
906 INVALID_FIELD_IN_CDB, 0);
907 kfree(arr);
908 return check_condition_result;
909 }
910 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
911 ret = fill_from_dev_buffer(scp, arr,
912 min(len, SDEBUG_MAX_INQ_ARR_SZ));
913 kfree(arr);
914 return ret;
915 }
916 /* drops through here for a standard inquiry */
917 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
918 arr[2] = scsi_debug_scsi_level;
919 arr[3] = 2; /* response_data_format==2 */
920 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
921 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
922 if (0 == scsi_debug_vpd_use_hostno)
923 arr[5] = 0x10; /* claim: implicit TGPS */
924 arr[6] = 0x10; /* claim: MultiP */
925 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
926 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
927 memcpy(&arr[8], inq_vendor_id, 8);
928 memcpy(&arr[16], inq_product_id, 16);
929 memcpy(&arr[32], inq_product_rev, 4);
930 /* version descriptors (2 bytes each) follow */
931 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
932 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
933 n = 62;
934 if (scsi_debug_ptype == 0) {
935 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
936 } else if (scsi_debug_ptype == 1) {
937 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
938 }
939 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
940 ret = fill_from_dev_buffer(scp, arr,
941 min(alloc_len, SDEBUG_LONG_INQ_SZ));
942 kfree(arr);
943 return ret;
944 }
945
946 static int resp_requests(struct scsi_cmnd * scp,
947 struct sdebug_dev_info * devip)
948 {
949 unsigned char * sbuff;
950 unsigned char *cmd = (unsigned char *)scp->cmnd;
951 unsigned char arr[SDEBUG_SENSE_LEN];
952 int want_dsense;
953 int len = 18;
954
955 memset(arr, 0, sizeof(arr));
956 if (devip->reset == 1)
957 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
958 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
959 sbuff = devip->sense_buff;
960 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
961 if (want_dsense) {
962 arr[0] = 0x72;
963 arr[1] = 0x0; /* NO_SENSE in sense_key */
964 arr[2] = THRESHOLD_EXCEEDED;
965 arr[3] = 0xff; /* TEST set and MRIE==6 */
966 } else {
967 arr[0] = 0x70;
968 arr[2] = 0x0; /* NO_SENSE in sense_key */
969 arr[7] = 0xa; /* 18 byte sense buffer */
970 arr[12] = THRESHOLD_EXCEEDED;
971 arr[13] = 0xff; /* TEST set and MRIE==6 */
972 }
973 } else {
974 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
975 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
976 /* DESC bit set and sense_buff in fixed format */
977 memset(arr, 0, sizeof(arr));
978 arr[0] = 0x72;
979 arr[1] = sbuff[2]; /* sense key */
980 arr[2] = sbuff[12]; /* asc */
981 arr[3] = sbuff[13]; /* ascq */
982 len = 8;
983 }
984 }
985 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
986 return fill_from_dev_buffer(scp, arr, len);
987 }
988
989 static int resp_start_stop(struct scsi_cmnd * scp,
990 struct sdebug_dev_info * devip)
991 {
992 unsigned char *cmd = (unsigned char *)scp->cmnd;
993 int power_cond, errsts, start;
994
995 if ((errsts = check_readiness(scp, 1, devip)))
996 return errsts;
997 power_cond = (cmd[4] & 0xf0) >> 4;
998 if (power_cond) {
999 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1000 0);
1001 return check_condition_result;
1002 }
1003 start = cmd[4] & 1;
1004 if (start == devip->stopped)
1005 devip->stopped = !start;
1006 return 0;
1007 }
1008
1009 static sector_t get_sdebug_capacity(void)
1010 {
1011 if (scsi_debug_virtual_gb > 0)
1012 return (sector_t)scsi_debug_virtual_gb *
1013 (1073741824 / scsi_debug_sector_size);
1014 else
1015 return sdebug_store_sectors;
1016 }
1017
1018 #define SDEBUG_READCAP_ARR_SZ 8
1019 static int resp_readcap(struct scsi_cmnd * scp,
1020 struct sdebug_dev_info * devip)
1021 {
1022 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1023 unsigned int capac;
1024 int errsts;
1025
1026 if ((errsts = check_readiness(scp, 1, devip)))
1027 return errsts;
1028 /* following just in case virtual_gb changed */
1029 sdebug_capacity = get_sdebug_capacity();
1030 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1031 if (sdebug_capacity < 0xffffffff) {
1032 capac = (unsigned int)sdebug_capacity - 1;
1033 arr[0] = (capac >> 24);
1034 arr[1] = (capac >> 16) & 0xff;
1035 arr[2] = (capac >> 8) & 0xff;
1036 arr[3] = capac & 0xff;
1037 } else {
1038 arr[0] = 0xff;
1039 arr[1] = 0xff;
1040 arr[2] = 0xff;
1041 arr[3] = 0xff;
1042 }
1043 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1044 arr[7] = scsi_debug_sector_size & 0xff;
1045 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1046 }
1047
1048 #define SDEBUG_READCAP16_ARR_SZ 32
1049 static int resp_readcap16(struct scsi_cmnd * scp,
1050 struct sdebug_dev_info * devip)
1051 {
1052 unsigned char *cmd = (unsigned char *)scp->cmnd;
1053 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1054 unsigned long long capac;
1055 int errsts, k, alloc_len;
1056
1057 if ((errsts = check_readiness(scp, 1, devip)))
1058 return errsts;
1059 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1060 + cmd[13]);
1061 /* following just in case virtual_gb changed */
1062 sdebug_capacity = get_sdebug_capacity();
1063 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1064 capac = sdebug_capacity - 1;
1065 for (k = 0; k < 8; ++k, capac >>= 8)
1066 arr[7 - k] = capac & 0xff;
1067 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1068 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1069 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1070 arr[11] = scsi_debug_sector_size & 0xff;
1071 arr[13] = scsi_debug_physblk_exp & 0xf;
1072 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1073
1074 if (scsi_debug_lbp())
1075 arr[14] |= 0x80; /* LBPME */
1076
1077 arr[15] = scsi_debug_lowest_aligned & 0xff;
1078
1079 if (scsi_debug_dif) {
1080 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1081 arr[12] |= 1; /* PROT_EN */
1082 }
1083
1084 return fill_from_dev_buffer(scp, arr,
1085 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1086 }
1087
1088 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1089
1090 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1091 struct sdebug_dev_info * devip)
1092 {
1093 unsigned char *cmd = (unsigned char *)scp->cmnd;
1094 unsigned char * arr;
1095 int host_no = devip->sdbg_host->shost->host_no;
1096 int n, ret, alen, rlen;
1097 int port_group_a, port_group_b, port_a, port_b;
1098
1099 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1100 + cmd[9]);
1101
1102 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1103 if (! arr)
1104 return DID_REQUEUE << 16;
1105 /*
1106 * EVPD page 0x88 states we have two ports, one
1107 * real and a fake port with no device connected.
1108 * So we create two port groups with one port each
1109 * and set the group with port B to unavailable.
1110 */
1111 port_a = 0x1; /* relative port A */
1112 port_b = 0x2; /* relative port B */
1113 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1114 (devip->channel & 0x7f);
1115 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1116 (devip->channel & 0x7f) + 0x80;
1117
1118 /*
1119 * The asymmetric access state is cycled according to the host_id.
1120 */
1121 n = 4;
1122 if (0 == scsi_debug_vpd_use_hostno) {
1123 arr[n++] = host_no % 3; /* Asymm access state */
1124 arr[n++] = 0x0F; /* claim: all states are supported */
1125 } else {
1126 arr[n++] = 0x0; /* Active/Optimized path */
1127 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1128 }
1129 arr[n++] = (port_group_a >> 8) & 0xff;
1130 arr[n++] = port_group_a & 0xff;
1131 arr[n++] = 0; /* Reserved */
1132 arr[n++] = 0; /* Status code */
1133 arr[n++] = 0; /* Vendor unique */
1134 arr[n++] = 0x1; /* One port per group */
1135 arr[n++] = 0; /* Reserved */
1136 arr[n++] = 0; /* Reserved */
1137 arr[n++] = (port_a >> 8) & 0xff;
1138 arr[n++] = port_a & 0xff;
1139 arr[n++] = 3; /* Port unavailable */
1140 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1141 arr[n++] = (port_group_b >> 8) & 0xff;
1142 arr[n++] = port_group_b & 0xff;
1143 arr[n++] = 0; /* Reserved */
1144 arr[n++] = 0; /* Status code */
1145 arr[n++] = 0; /* Vendor unique */
1146 arr[n++] = 0x1; /* One port per group */
1147 arr[n++] = 0; /* Reserved */
1148 arr[n++] = 0; /* Reserved */
1149 arr[n++] = (port_b >> 8) & 0xff;
1150 arr[n++] = port_b & 0xff;
1151
1152 rlen = n - 4;
1153 arr[0] = (rlen >> 24) & 0xff;
1154 arr[1] = (rlen >> 16) & 0xff;
1155 arr[2] = (rlen >> 8) & 0xff;
1156 arr[3] = rlen & 0xff;
1157
1158 /*
1159 * Return the smallest value of either
1160 * - The allocated length
1161 * - The constructed command length
1162 * - The maximum array size
1163 */
1164 rlen = min(alen,n);
1165 ret = fill_from_dev_buffer(scp, arr,
1166 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1167 kfree(arr);
1168 return ret;
1169 }
1170
1171 /* <<Following mode page info copied from ST318451LW>> */
1172
1173 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1174 { /* Read-Write Error Recovery page for mode_sense */
1175 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1176 5, 0, 0xff, 0xff};
1177
1178 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1179 if (1 == pcontrol)
1180 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1181 return sizeof(err_recov_pg);
1182 }
1183
1184 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1185 { /* Disconnect-Reconnect page for mode_sense */
1186 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0};
1188
1189 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1190 if (1 == pcontrol)
1191 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1192 return sizeof(disconnect_pg);
1193 }
1194
1195 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1196 { /* Format device page for mode_sense */
1197 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1198 0, 0, 0, 0, 0, 0, 0, 0,
1199 0, 0, 0, 0, 0x40, 0, 0, 0};
1200
1201 memcpy(p, format_pg, sizeof(format_pg));
1202 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1203 p[11] = sdebug_sectors_per & 0xff;
1204 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1205 p[13] = scsi_debug_sector_size & 0xff;
1206 if (DEV_REMOVEABLE(target))
1207 p[20] |= 0x20; /* should agree with INQUIRY */
1208 if (1 == pcontrol)
1209 memset(p + 2, 0, sizeof(format_pg) - 2);
1210 return sizeof(format_pg);
1211 }
1212
1213 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1214 { /* Caching page for mode_sense */
1215 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1216 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1217
1218 memcpy(p, caching_pg, sizeof(caching_pg));
1219 if (1 == pcontrol)
1220 memset(p + 2, 0, sizeof(caching_pg) - 2);
1221 return sizeof(caching_pg);
1222 }
1223
1224 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1225 { /* Control mode page for mode_sense */
1226 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1227 0, 0, 0, 0};
1228 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1229 0, 0, 0x2, 0x4b};
1230
1231 if (scsi_debug_dsense)
1232 ctrl_m_pg[2] |= 0x4;
1233 else
1234 ctrl_m_pg[2] &= ~0x4;
1235
1236 if (scsi_debug_ato)
1237 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1238
1239 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1240 if (1 == pcontrol)
1241 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1242 else if (2 == pcontrol)
1243 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1244 return sizeof(ctrl_m_pg);
1245 }
1246
1247
1248 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1249 { /* Informational Exceptions control mode page for mode_sense */
1250 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1251 0, 0, 0x0, 0x0};
1252 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1253 0, 0, 0x0, 0x0};
1254
1255 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1256 if (1 == pcontrol)
1257 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1258 else if (2 == pcontrol)
1259 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1260 return sizeof(iec_m_pg);
1261 }
1262
1263 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1264 { /* SAS SSP mode page - short format for mode_sense */
1265 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1266 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1267
1268 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1269 if (1 == pcontrol)
1270 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1271 return sizeof(sas_sf_m_pg);
1272 }
1273
1274
1275 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1276 int target_dev_id)
1277 { /* SAS phy control and discover mode page for mode_sense */
1278 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1279 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1280 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1281 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1282 0x2, 0, 0, 0, 0, 0, 0, 0,
1283 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1284 0, 0, 0, 0, 0, 0, 0, 0,
1285 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1286 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1287 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1288 0x3, 0, 0, 0, 0, 0, 0, 0,
1289 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1290 0, 0, 0, 0, 0, 0, 0, 0,
1291 };
1292 int port_a, port_b;
1293
1294 port_a = target_dev_id + 1;
1295 port_b = port_a + 1;
1296 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1297 p[20] = (port_a >> 24);
1298 p[21] = (port_a >> 16) & 0xff;
1299 p[22] = (port_a >> 8) & 0xff;
1300 p[23] = port_a & 0xff;
1301 p[48 + 20] = (port_b >> 24);
1302 p[48 + 21] = (port_b >> 16) & 0xff;
1303 p[48 + 22] = (port_b >> 8) & 0xff;
1304 p[48 + 23] = port_b & 0xff;
1305 if (1 == pcontrol)
1306 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1307 return sizeof(sas_pcd_m_pg);
1308 }
1309
1310 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1311 { /* SAS SSP shared protocol specific port mode subpage */
1312 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1313 0, 0, 0, 0, 0, 0, 0, 0,
1314 };
1315
1316 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1317 if (1 == pcontrol)
1318 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1319 return sizeof(sas_sha_m_pg);
1320 }
1321
1322 #define SDEBUG_MAX_MSENSE_SZ 256
1323
1324 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1325 struct sdebug_dev_info * devip)
1326 {
1327 unsigned char dbd, llbaa;
1328 int pcontrol, pcode, subpcode, bd_len;
1329 unsigned char dev_spec;
1330 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1331 unsigned char * ap;
1332 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1333 unsigned char *cmd = (unsigned char *)scp->cmnd;
1334
1335 if ((errsts = check_readiness(scp, 1, devip)))
1336 return errsts;
1337 dbd = !!(cmd[1] & 0x8);
1338 pcontrol = (cmd[2] & 0xc0) >> 6;
1339 pcode = cmd[2] & 0x3f;
1340 subpcode = cmd[3];
1341 msense_6 = (MODE_SENSE == cmd[0]);
1342 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1343 if ((0 == scsi_debug_ptype) && (0 == dbd))
1344 bd_len = llbaa ? 16 : 8;
1345 else
1346 bd_len = 0;
1347 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1348 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1349 if (0x3 == pcontrol) { /* Saving values not supported */
1350 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1351 0);
1352 return check_condition_result;
1353 }
1354 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1355 (devip->target * 1000) - 3;
1356 /* set DPOFUA bit for disks */
1357 if (0 == scsi_debug_ptype)
1358 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1359 else
1360 dev_spec = 0x0;
1361 if (msense_6) {
1362 arr[2] = dev_spec;
1363 arr[3] = bd_len;
1364 offset = 4;
1365 } else {
1366 arr[3] = dev_spec;
1367 if (16 == bd_len)
1368 arr[4] = 0x1; /* set LONGLBA bit */
1369 arr[7] = bd_len; /* assume 255 or less */
1370 offset = 8;
1371 }
1372 ap = arr + offset;
1373 if ((bd_len > 0) && (!sdebug_capacity))
1374 sdebug_capacity = get_sdebug_capacity();
1375
1376 if (8 == bd_len) {
1377 if (sdebug_capacity > 0xfffffffe) {
1378 ap[0] = 0xff;
1379 ap[1] = 0xff;
1380 ap[2] = 0xff;
1381 ap[3] = 0xff;
1382 } else {
1383 ap[0] = (sdebug_capacity >> 24) & 0xff;
1384 ap[1] = (sdebug_capacity >> 16) & 0xff;
1385 ap[2] = (sdebug_capacity >> 8) & 0xff;
1386 ap[3] = sdebug_capacity & 0xff;
1387 }
1388 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1389 ap[7] = scsi_debug_sector_size & 0xff;
1390 offset += bd_len;
1391 ap = arr + offset;
1392 } else if (16 == bd_len) {
1393 unsigned long long capac = sdebug_capacity;
1394
1395 for (k = 0; k < 8; ++k, capac >>= 8)
1396 ap[7 - k] = capac & 0xff;
1397 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1398 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1399 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1400 ap[15] = scsi_debug_sector_size & 0xff;
1401 offset += bd_len;
1402 ap = arr + offset;
1403 }
1404
1405 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1406 /* TODO: Control Extension page */
1407 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1408 0);
1409 return check_condition_result;
1410 }
1411 switch (pcode) {
1412 case 0x1: /* Read-Write error recovery page, direct access */
1413 len = resp_err_recov_pg(ap, pcontrol, target);
1414 offset += len;
1415 break;
1416 case 0x2: /* Disconnect-Reconnect page, all devices */
1417 len = resp_disconnect_pg(ap, pcontrol, target);
1418 offset += len;
1419 break;
1420 case 0x3: /* Format device page, direct access */
1421 len = resp_format_pg(ap, pcontrol, target);
1422 offset += len;
1423 break;
1424 case 0x8: /* Caching page, direct access */
1425 len = resp_caching_pg(ap, pcontrol, target);
1426 offset += len;
1427 break;
1428 case 0xa: /* Control Mode page, all devices */
1429 len = resp_ctrl_m_pg(ap, pcontrol, target);
1430 offset += len;
1431 break;
1432 case 0x19: /* if spc==1 then sas phy, control+discover */
1433 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1434 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1435 INVALID_FIELD_IN_CDB, 0);
1436 return check_condition_result;
1437 }
1438 len = 0;
1439 if ((0x0 == subpcode) || (0xff == subpcode))
1440 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1441 if ((0x1 == subpcode) || (0xff == subpcode))
1442 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1443 target_dev_id);
1444 if ((0x2 == subpcode) || (0xff == subpcode))
1445 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1446 offset += len;
1447 break;
1448 case 0x1c: /* Informational Exceptions Mode page, all devices */
1449 len = resp_iec_m_pg(ap, pcontrol, target);
1450 offset += len;
1451 break;
1452 case 0x3f: /* Read all Mode pages */
1453 if ((0 == subpcode) || (0xff == subpcode)) {
1454 len = resp_err_recov_pg(ap, pcontrol, target);
1455 len += resp_disconnect_pg(ap + len, pcontrol, target);
1456 len += resp_format_pg(ap + len, pcontrol, target);
1457 len += resp_caching_pg(ap + len, pcontrol, target);
1458 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1459 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1460 if (0xff == subpcode) {
1461 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1462 target, target_dev_id);
1463 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1464 }
1465 len += resp_iec_m_pg(ap + len, pcontrol, target);
1466 } else {
1467 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1468 INVALID_FIELD_IN_CDB, 0);
1469 return check_condition_result;
1470 }
1471 offset += len;
1472 break;
1473 default:
1474 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1475 0);
1476 return check_condition_result;
1477 }
1478 if (msense_6)
1479 arr[0] = offset - 1;
1480 else {
1481 arr[0] = ((offset - 2) >> 8) & 0xff;
1482 arr[1] = (offset - 2) & 0xff;
1483 }
1484 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1485 }
1486
1487 #define SDEBUG_MAX_MSELECT_SZ 512
1488
1489 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1490 struct sdebug_dev_info * devip)
1491 {
1492 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1493 int param_len, res, errsts, mpage;
1494 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1495 unsigned char *cmd = (unsigned char *)scp->cmnd;
1496
1497 if ((errsts = check_readiness(scp, 1, devip)))
1498 return errsts;
1499 memset(arr, 0, sizeof(arr));
1500 pf = cmd[1] & 0x10;
1501 sp = cmd[1] & 0x1;
1502 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1503 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1504 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1505 INVALID_FIELD_IN_CDB, 0);
1506 return check_condition_result;
1507 }
1508 res = fetch_to_dev_buffer(scp, arr, param_len);
1509 if (-1 == res)
1510 return (DID_ERROR << 16);
1511 else if ((res < param_len) &&
1512 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1513 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1514 " IO sent=%d bytes\n", param_len, res);
1515 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1516 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1517 if (md_len > 2) {
1518 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1519 INVALID_FIELD_IN_PARAM_LIST, 0);
1520 return check_condition_result;
1521 }
1522 off = bd_len + (mselect6 ? 4 : 8);
1523 mpage = arr[off] & 0x3f;
1524 ps = !!(arr[off] & 0x80);
1525 if (ps) {
1526 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1527 INVALID_FIELD_IN_PARAM_LIST, 0);
1528 return check_condition_result;
1529 }
1530 spf = !!(arr[off] & 0x40);
1531 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1532 (arr[off + 1] + 2);
1533 if ((pg_len + off) > param_len) {
1534 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1535 PARAMETER_LIST_LENGTH_ERR, 0);
1536 return check_condition_result;
1537 }
1538 switch (mpage) {
1539 case 0xa: /* Control Mode page */
1540 if (ctrl_m_pg[1] == arr[off + 1]) {
1541 memcpy(ctrl_m_pg + 2, arr + off + 2,
1542 sizeof(ctrl_m_pg) - 2);
1543 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1544 return 0;
1545 }
1546 break;
1547 case 0x1c: /* Informational Exceptions Mode page */
1548 if (iec_m_pg[1] == arr[off + 1]) {
1549 memcpy(iec_m_pg + 2, arr + off + 2,
1550 sizeof(iec_m_pg) - 2);
1551 return 0;
1552 }
1553 break;
1554 default:
1555 break;
1556 }
1557 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1558 INVALID_FIELD_IN_PARAM_LIST, 0);
1559 return check_condition_result;
1560 }
1561
1562 static int resp_temp_l_pg(unsigned char * arr)
1563 {
1564 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1565 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1566 };
1567
1568 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1569 return sizeof(temp_l_pg);
1570 }
1571
1572 static int resp_ie_l_pg(unsigned char * arr)
1573 {
1574 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1575 };
1576
1577 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1578 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1579 arr[4] = THRESHOLD_EXCEEDED;
1580 arr[5] = 0xff;
1581 }
1582 return sizeof(ie_l_pg);
1583 }
1584
1585 #define SDEBUG_MAX_LSENSE_SZ 512
1586
1587 static int resp_log_sense(struct scsi_cmnd * scp,
1588 struct sdebug_dev_info * devip)
1589 {
1590 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1591 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1592 unsigned char *cmd = (unsigned char *)scp->cmnd;
1593
1594 if ((errsts = check_readiness(scp, 1, devip)))
1595 return errsts;
1596 memset(arr, 0, sizeof(arr));
1597 ppc = cmd[1] & 0x2;
1598 sp = cmd[1] & 0x1;
1599 if (ppc || sp) {
1600 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1601 INVALID_FIELD_IN_CDB, 0);
1602 return check_condition_result;
1603 }
1604 pcontrol = (cmd[2] & 0xc0) >> 6;
1605 pcode = cmd[2] & 0x3f;
1606 subpcode = cmd[3] & 0xff;
1607 alloc_len = (cmd[7] << 8) + cmd[8];
1608 arr[0] = pcode;
1609 if (0 == subpcode) {
1610 switch (pcode) {
1611 case 0x0: /* Supported log pages log page */
1612 n = 4;
1613 arr[n++] = 0x0; /* this page */
1614 arr[n++] = 0xd; /* Temperature */
1615 arr[n++] = 0x2f; /* Informational exceptions */
1616 arr[3] = n - 4;
1617 break;
1618 case 0xd: /* Temperature log page */
1619 arr[3] = resp_temp_l_pg(arr + 4);
1620 break;
1621 case 0x2f: /* Informational exceptions log page */
1622 arr[3] = resp_ie_l_pg(arr + 4);
1623 break;
1624 default:
1625 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1626 INVALID_FIELD_IN_CDB, 0);
1627 return check_condition_result;
1628 }
1629 } else if (0xff == subpcode) {
1630 arr[0] |= 0x40;
1631 arr[1] = subpcode;
1632 switch (pcode) {
1633 case 0x0: /* Supported log pages and subpages log page */
1634 n = 4;
1635 arr[n++] = 0x0;
1636 arr[n++] = 0x0; /* 0,0 page */
1637 arr[n++] = 0x0;
1638 arr[n++] = 0xff; /* this page */
1639 arr[n++] = 0xd;
1640 arr[n++] = 0x0; /* Temperature */
1641 arr[n++] = 0x2f;
1642 arr[n++] = 0x0; /* Informational exceptions */
1643 arr[3] = n - 4;
1644 break;
1645 case 0xd: /* Temperature subpages */
1646 n = 4;
1647 arr[n++] = 0xd;
1648 arr[n++] = 0x0; /* Temperature */
1649 arr[3] = n - 4;
1650 break;
1651 case 0x2f: /* Informational exceptions subpages */
1652 n = 4;
1653 arr[n++] = 0x2f;
1654 arr[n++] = 0x0; /* Informational exceptions */
1655 arr[3] = n - 4;
1656 break;
1657 default:
1658 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1659 INVALID_FIELD_IN_CDB, 0);
1660 return check_condition_result;
1661 }
1662 } else {
1663 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1664 INVALID_FIELD_IN_CDB, 0);
1665 return check_condition_result;
1666 }
1667 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1668 return fill_from_dev_buffer(scp, arr,
1669 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1670 }
1671
1672 static int check_device_access_params(struct sdebug_dev_info *devi,
1673 unsigned long long lba, unsigned int num)
1674 {
1675 if (lba + num > sdebug_capacity) {
1676 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1677 return check_condition_result;
1678 }
1679 /* transfer length excessive (tie in to block limits VPD page) */
1680 if (num > sdebug_store_sectors) {
1681 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1682 return check_condition_result;
1683 }
1684 return 0;
1685 }
1686
1687 static int do_device_access(struct scsi_cmnd *scmd,
1688 struct sdebug_dev_info *devi,
1689 unsigned long long lba, unsigned int num, int write)
1690 {
1691 int ret;
1692 unsigned long long block, rest = 0;
1693 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1694
1695 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1696
1697 block = do_div(lba, sdebug_store_sectors);
1698 if (block + num > sdebug_store_sectors)
1699 rest = block + num - sdebug_store_sectors;
1700
1701 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1702 (num - rest) * scsi_debug_sector_size);
1703 if (!ret && rest)
1704 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1705
1706 return ret;
1707 }
1708
1709 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1710 unsigned int sectors, u32 ei_lba)
1711 {
1712 unsigned int i, resid;
1713 struct scatterlist *psgl;
1714 struct sd_dif_tuple *sdt;
1715 sector_t sector;
1716 sector_t tmp_sec = start_sec;
1717 void *paddr;
1718
1719 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1720
1721 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1722
1723 for (i = 0 ; i < sectors ; i++) {
1724 u16 csum;
1725
1726 if (sdt[i].app_tag == 0xffff)
1727 continue;
1728
1729 sector = start_sec + i;
1730
1731 switch (scsi_debug_guard) {
1732 case 1:
1733 csum = ip_compute_csum(fake_storep +
1734 sector * scsi_debug_sector_size,
1735 scsi_debug_sector_size);
1736 break;
1737 case 0:
1738 csum = crc_t10dif(fake_storep +
1739 sector * scsi_debug_sector_size,
1740 scsi_debug_sector_size);
1741 csum = cpu_to_be16(csum);
1742 break;
1743 default:
1744 BUG();
1745 }
1746
1747 if (sdt[i].guard_tag != csum) {
1748 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1749 " rcvd 0x%04x, data 0x%04x\n", __func__,
1750 (unsigned long)sector,
1751 be16_to_cpu(sdt[i].guard_tag),
1752 be16_to_cpu(csum));
1753 dif_errors++;
1754 return 0x01;
1755 }
1756
1757 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1758 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1759 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1760 __func__, (unsigned long)sector);
1761 dif_errors++;
1762 return 0x03;
1763 }
1764
1765 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1766 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1767 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1768 __func__, (unsigned long)sector);
1769 dif_errors++;
1770 return 0x03;
1771 }
1772
1773 ei_lba++;
1774 }
1775
1776 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1777 sector = start_sec;
1778
1779 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1780 int len = min(psgl->length, resid);
1781
1782 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1783 memcpy(paddr, dif_storep + dif_offset(sector), len);
1784
1785 sector += len >> 3;
1786 if (sector >= sdebug_store_sectors) {
1787 /* Force wrap */
1788 tmp_sec = sector;
1789 sector = do_div(tmp_sec, sdebug_store_sectors);
1790 }
1791 resid -= len;
1792 kunmap_atomic(paddr);
1793 }
1794
1795 dix_reads++;
1796
1797 return 0;
1798 }
1799
1800 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1801 unsigned int num, struct sdebug_dev_info *devip,
1802 u32 ei_lba)
1803 {
1804 unsigned long iflags;
1805 int ret;
1806
1807 ret = check_device_access_params(devip, lba, num);
1808 if (ret)
1809 return ret;
1810
1811 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1812 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1813 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1814 /* claim unrecoverable read error */
1815 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1816 /* set info field and valid bit for fixed descriptor */
1817 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1818 devip->sense_buff[0] |= 0x80; /* Valid bit */
1819 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1820 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1821 devip->sense_buff[3] = (ret >> 24) & 0xff;
1822 devip->sense_buff[4] = (ret >> 16) & 0xff;
1823 devip->sense_buff[5] = (ret >> 8) & 0xff;
1824 devip->sense_buff[6] = ret & 0xff;
1825 }
1826 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1827 return check_condition_result;
1828 }
1829
1830 /* DIX + T10 DIF */
1831 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1832 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1833
1834 if (prot_ret) {
1835 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1836 return illegal_condition_result;
1837 }
1838 }
1839
1840 read_lock_irqsave(&atomic_rw, iflags);
1841 ret = do_device_access(SCpnt, devip, lba, num, 0);
1842 read_unlock_irqrestore(&atomic_rw, iflags);
1843 return ret;
1844 }
1845
1846 void dump_sector(unsigned char *buf, int len)
1847 {
1848 int i, j;
1849
1850 printk(KERN_ERR ">>> Sector Dump <<<\n");
1851
1852 for (i = 0 ; i < len ; i += 16) {
1853 printk(KERN_ERR "%04d: ", i);
1854
1855 for (j = 0 ; j < 16 ; j++) {
1856 unsigned char c = buf[i+j];
1857 if (c >= 0x20 && c < 0x7e)
1858 printk(" %c ", buf[i+j]);
1859 else
1860 printk("%02x ", buf[i+j]);
1861 }
1862
1863 printk("\n");
1864 }
1865 }
1866
1867 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1868 unsigned int sectors, u32 ei_lba)
1869 {
1870 int i, j, ret;
1871 struct sd_dif_tuple *sdt;
1872 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1873 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1874 void *daddr, *paddr;
1875 sector_t tmp_sec = start_sec;
1876 sector_t sector;
1877 int ppage_offset;
1878 unsigned short csum;
1879
1880 sector = do_div(tmp_sec, sdebug_store_sectors);
1881
1882 BUG_ON(scsi_sg_count(SCpnt) == 0);
1883 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1884
1885 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1886 ppage_offset = 0;
1887
1888 /* For each data page */
1889 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1890 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1891
1892 /* For each sector-sized chunk in data page */
1893 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1894
1895 /* If we're at the end of the current
1896 * protection page advance to the next one
1897 */
1898 if (ppage_offset >= psgl->length) {
1899 kunmap_atomic(paddr);
1900 psgl = sg_next(psgl);
1901 BUG_ON(psgl == NULL);
1902 paddr = kmap_atomic(sg_page(psgl))
1903 + psgl->offset;
1904 ppage_offset = 0;
1905 }
1906
1907 sdt = paddr + ppage_offset;
1908
1909 switch (scsi_debug_guard) {
1910 case 1:
1911 csum = ip_compute_csum(daddr,
1912 scsi_debug_sector_size);
1913 break;
1914 case 0:
1915 csum = cpu_to_be16(crc_t10dif(daddr,
1916 scsi_debug_sector_size));
1917 break;
1918 default:
1919 BUG();
1920 ret = 0;
1921 goto out;
1922 }
1923
1924 if (sdt->guard_tag != csum) {
1925 printk(KERN_ERR
1926 "%s: GUARD check failed on sector %lu " \
1927 "rcvd 0x%04x, calculated 0x%04x\n",
1928 __func__, (unsigned long)sector,
1929 be16_to_cpu(sdt->guard_tag),
1930 be16_to_cpu(csum));
1931 ret = 0x01;
1932 dump_sector(daddr, scsi_debug_sector_size);
1933 goto out;
1934 }
1935
1936 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1937 be32_to_cpu(sdt->ref_tag)
1938 != (start_sec & 0xffffffff)) {
1939 printk(KERN_ERR
1940 "%s: REF check failed on sector %lu\n",
1941 __func__, (unsigned long)sector);
1942 ret = 0x03;
1943 dump_sector(daddr, scsi_debug_sector_size);
1944 goto out;
1945 }
1946
1947 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1948 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1949 printk(KERN_ERR
1950 "%s: REF check failed on sector %lu\n",
1951 __func__, (unsigned long)sector);
1952 ret = 0x03;
1953 dump_sector(daddr, scsi_debug_sector_size);
1954 goto out;
1955 }
1956
1957 /* Would be great to copy this in bigger
1958 * chunks. However, for the sake of
1959 * correctness we need to verify each sector
1960 * before writing it to "stable" storage
1961 */
1962 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1963
1964 sector++;
1965
1966 if (sector == sdebug_store_sectors)
1967 sector = 0; /* Force wrap */
1968
1969 start_sec++;
1970 ei_lba++;
1971 daddr += scsi_debug_sector_size;
1972 ppage_offset += sizeof(struct sd_dif_tuple);
1973 }
1974
1975 kunmap_atomic(daddr);
1976 }
1977
1978 kunmap_atomic(paddr);
1979
1980 dix_writes++;
1981
1982 return 0;
1983
1984 out:
1985 dif_errors++;
1986 kunmap_atomic(daddr);
1987 kunmap_atomic(paddr);
1988 return ret;
1989 }
1990
1991 static unsigned int map_state(sector_t lba, unsigned int *num)
1992 {
1993 unsigned int granularity, alignment, mapped;
1994 sector_t block, next, end;
1995
1996 granularity = scsi_debug_unmap_granularity;
1997 alignment = granularity - scsi_debug_unmap_alignment;
1998 block = lba + alignment;
1999 do_div(block, granularity);
2000
2001 mapped = test_bit(block, map_storep);
2002
2003 if (mapped)
2004 next = find_next_zero_bit(map_storep, map_size, block);
2005 else
2006 next = find_next_bit(map_storep, map_size, block);
2007
2008 end = next * granularity - scsi_debug_unmap_alignment;
2009 *num = end - lba;
2010
2011 return mapped;
2012 }
2013
2014 static void map_region(sector_t lba, unsigned int len)
2015 {
2016 unsigned int granularity, alignment;
2017 sector_t end = lba + len;
2018
2019 granularity = scsi_debug_unmap_granularity;
2020 alignment = granularity - scsi_debug_unmap_alignment;
2021
2022 while (lba < end) {
2023 sector_t block, rem;
2024
2025 block = lba + alignment;
2026 rem = do_div(block, granularity);
2027
2028 if (block < map_size)
2029 set_bit(block, map_storep);
2030
2031 lba += granularity - rem;
2032 }
2033 }
2034
2035 static void unmap_region(sector_t lba, unsigned int len)
2036 {
2037 unsigned int granularity, alignment;
2038 sector_t end = lba + len;
2039
2040 granularity = scsi_debug_unmap_granularity;
2041 alignment = granularity - scsi_debug_unmap_alignment;
2042
2043 while (lba < end) {
2044 sector_t block, rem;
2045
2046 block = lba + alignment;
2047 rem = do_div(block, granularity);
2048
2049 if (rem == 0 && lba + granularity <= end &&
2050 block < map_size)
2051 clear_bit(block, map_storep);
2052
2053 lba += granularity - rem;
2054 }
2055 }
2056
2057 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2058 unsigned int num, struct sdebug_dev_info *devip,
2059 u32 ei_lba)
2060 {
2061 unsigned long iflags;
2062 int ret;
2063
2064 ret = check_device_access_params(devip, lba, num);
2065 if (ret)
2066 return ret;
2067
2068 /* DIX + T10 DIF */
2069 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2070 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2071
2072 if (prot_ret) {
2073 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2074 return illegal_condition_result;
2075 }
2076 }
2077
2078 write_lock_irqsave(&atomic_rw, iflags);
2079 ret = do_device_access(SCpnt, devip, lba, num, 1);
2080 if (scsi_debug_unmap_granularity)
2081 map_region(lba, num);
2082 write_unlock_irqrestore(&atomic_rw, iflags);
2083 if (-1 == ret)
2084 return (DID_ERROR << 16);
2085 else if ((ret < (num * scsi_debug_sector_size)) &&
2086 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2087 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2088 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2089
2090 return 0;
2091 }
2092
2093 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2094 unsigned int num, struct sdebug_dev_info *devip,
2095 u32 ei_lba, unsigned int unmap)
2096 {
2097 unsigned long iflags;
2098 unsigned long long i;
2099 int ret;
2100
2101 ret = check_device_access_params(devip, lba, num);
2102 if (ret)
2103 return ret;
2104
2105 if (num > scsi_debug_write_same_length) {
2106 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2107 0);
2108 return check_condition_result;
2109 }
2110
2111 write_lock_irqsave(&atomic_rw, iflags);
2112
2113 if (unmap && scsi_debug_unmap_granularity) {
2114 unmap_region(lba, num);
2115 goto out;
2116 }
2117
2118 /* Else fetch one logical block */
2119 ret = fetch_to_dev_buffer(scmd,
2120 fake_storep + (lba * scsi_debug_sector_size),
2121 scsi_debug_sector_size);
2122
2123 if (-1 == ret) {
2124 write_unlock_irqrestore(&atomic_rw, iflags);
2125 return (DID_ERROR << 16);
2126 } else if ((ret < (num * scsi_debug_sector_size)) &&
2127 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2128 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2129 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2130
2131 /* Copy first sector to remaining blocks */
2132 for (i = 1 ; i < num ; i++)
2133 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2134 fake_storep + (lba * scsi_debug_sector_size),
2135 scsi_debug_sector_size);
2136
2137 if (scsi_debug_unmap_granularity)
2138 map_region(lba, num);
2139 out:
2140 write_unlock_irqrestore(&atomic_rw, iflags);
2141
2142 return 0;
2143 }
2144
2145 struct unmap_block_desc {
2146 __be64 lba;
2147 __be32 blocks;
2148 __be32 __reserved;
2149 };
2150
2151 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2152 {
2153 unsigned char *buf;
2154 struct unmap_block_desc *desc;
2155 unsigned int i, payload_len, descriptors;
2156 int ret;
2157
2158 ret = check_readiness(scmd, 1, devip);
2159 if (ret)
2160 return ret;
2161
2162 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2163 BUG_ON(scsi_bufflen(scmd) != payload_len);
2164
2165 descriptors = (payload_len - 8) / 16;
2166
2167 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2168 if (!buf)
2169 return check_condition_result;
2170
2171 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2172
2173 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2174 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2175
2176 desc = (void *)&buf[8];
2177
2178 for (i = 0 ; i < descriptors ; i++) {
2179 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2180 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2181
2182 ret = check_device_access_params(devip, lba, num);
2183 if (ret)
2184 goto out;
2185
2186 unmap_region(lba, num);
2187 }
2188
2189 ret = 0;
2190
2191 out:
2192 kfree(buf);
2193
2194 return ret;
2195 }
2196
2197 #define SDEBUG_GET_LBA_STATUS_LEN 32
2198
2199 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2200 struct sdebug_dev_info * devip)
2201 {
2202 unsigned long long lba;
2203 unsigned int alloc_len, mapped, num;
2204 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2205 int ret;
2206
2207 ret = check_readiness(scmd, 1, devip);
2208 if (ret)
2209 return ret;
2210
2211 lba = get_unaligned_be64(&scmd->cmnd[2]);
2212 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2213
2214 if (alloc_len < 24)
2215 return 0;
2216
2217 ret = check_device_access_params(devip, lba, 1);
2218 if (ret)
2219 return ret;
2220
2221 mapped = map_state(lba, &num);
2222
2223 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2224 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2225 put_unaligned_be64(lba, &arr[8]); /* LBA */
2226 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2227 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2228
2229 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2230 }
2231
2232 #define SDEBUG_RLUN_ARR_SZ 256
2233
2234 static int resp_report_luns(struct scsi_cmnd * scp,
2235 struct sdebug_dev_info * devip)
2236 {
2237 unsigned int alloc_len;
2238 int lun_cnt, i, upper, num, n, wlun, lun;
2239 unsigned char *cmd = (unsigned char *)scp->cmnd;
2240 int select_report = (int)cmd[2];
2241 struct scsi_lun *one_lun;
2242 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2243 unsigned char * max_addr;
2244
2245 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2246 if ((alloc_len < 4) || (select_report > 2)) {
2247 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2248 0);
2249 return check_condition_result;
2250 }
2251 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2252 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2253 lun_cnt = scsi_debug_max_luns;
2254 if (1 == select_report)
2255 lun_cnt = 0;
2256 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2257 --lun_cnt;
2258 wlun = (select_report > 0) ? 1 : 0;
2259 num = lun_cnt + wlun;
2260 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2261 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2262 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2263 sizeof(struct scsi_lun)), num);
2264 if (n < num) {
2265 wlun = 0;
2266 lun_cnt = n;
2267 }
2268 one_lun = (struct scsi_lun *) &arr[8];
2269 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2270 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2271 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2272 i++, lun++) {
2273 upper = (lun >> 8) & 0x3f;
2274 if (upper)
2275 one_lun[i].scsi_lun[0] =
2276 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2277 one_lun[i].scsi_lun[1] = lun & 0xff;
2278 }
2279 if (wlun) {
2280 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2281 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2282 i++;
2283 }
2284 alloc_len = (unsigned char *)(one_lun + i) - arr;
2285 return fill_from_dev_buffer(scp, arr,
2286 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2287 }
2288
2289 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2290 unsigned int num, struct sdebug_dev_info *devip)
2291 {
2292 int i, j, ret = -1;
2293 unsigned char *kaddr, *buf;
2294 unsigned int offset;
2295 struct scatterlist *sg;
2296 struct scsi_data_buffer *sdb = scsi_in(scp);
2297
2298 /* better not to use temporary buffer. */
2299 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2300 if (!buf)
2301 return ret;
2302
2303 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2304
2305 offset = 0;
2306 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2307 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2308 if (!kaddr)
2309 goto out;
2310
2311 for (j = 0; j < sg->length; j++)
2312 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2313
2314 offset += sg->length;
2315 kunmap_atomic(kaddr);
2316 }
2317 ret = 0;
2318 out:
2319 kfree(buf);
2320
2321 return ret;
2322 }
2323
2324 /* When timer goes off this function is called. */
2325 static void timer_intr_handler(unsigned long indx)
2326 {
2327 struct sdebug_queued_cmd * sqcp;
2328 unsigned long iflags;
2329
2330 if (indx >= scsi_debug_max_queue) {
2331 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2332 "large\n");
2333 return;
2334 }
2335 spin_lock_irqsave(&queued_arr_lock, iflags);
2336 sqcp = &queued_arr[(int)indx];
2337 if (! sqcp->in_use) {
2338 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2339 "interrupt\n");
2340 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2341 return;
2342 }
2343 sqcp->in_use = 0;
2344 if (sqcp->done_funct) {
2345 sqcp->a_cmnd->result = sqcp->scsi_result;
2346 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2347 }
2348 sqcp->done_funct = NULL;
2349 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2350 }
2351
2352
2353 static struct sdebug_dev_info *
2354 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2355 {
2356 struct sdebug_dev_info *devip;
2357
2358 devip = kzalloc(sizeof(*devip), flags);
2359 if (devip) {
2360 devip->sdbg_host = sdbg_host;
2361 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2362 }
2363 return devip;
2364 }
2365
2366 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2367 {
2368 struct sdebug_host_info * sdbg_host;
2369 struct sdebug_dev_info * open_devip = NULL;
2370 struct sdebug_dev_info * devip =
2371 (struct sdebug_dev_info *)sdev->hostdata;
2372
2373 if (devip)
2374 return devip;
2375 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2376 if (!sdbg_host) {
2377 printk(KERN_ERR "Host info NULL\n");
2378 return NULL;
2379 }
2380 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2381 if ((devip->used) && (devip->channel == sdev->channel) &&
2382 (devip->target == sdev->id) &&
2383 (devip->lun == sdev->lun))
2384 return devip;
2385 else {
2386 if ((!devip->used) && (!open_devip))
2387 open_devip = devip;
2388 }
2389 }
2390 if (!open_devip) { /* try and make a new one */
2391 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2392 if (!open_devip) {
2393 printk(KERN_ERR "%s: out of memory at line %d\n",
2394 __func__, __LINE__);
2395 return NULL;
2396 }
2397 }
2398
2399 open_devip->channel = sdev->channel;
2400 open_devip->target = sdev->id;
2401 open_devip->lun = sdev->lun;
2402 open_devip->sdbg_host = sdbg_host;
2403 open_devip->reset = 1;
2404 open_devip->used = 1;
2405 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2406 if (scsi_debug_dsense)
2407 open_devip->sense_buff[0] = 0x72;
2408 else {
2409 open_devip->sense_buff[0] = 0x70;
2410 open_devip->sense_buff[7] = 0xa;
2411 }
2412 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2413 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2414
2415 return open_devip;
2416 }
2417
2418 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2419 {
2420 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2421 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2422 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2423 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2424 return 0;
2425 }
2426
2427 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2428 {
2429 struct sdebug_dev_info *devip;
2430
2431 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2432 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2433 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2434 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2435 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2436 devip = devInfoReg(sdp);
2437 if (NULL == devip)
2438 return 1; /* no resources, will be marked offline */
2439 sdp->hostdata = devip;
2440 if (sdp->host->cmd_per_lun)
2441 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2442 sdp->host->cmd_per_lun);
2443 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2444 if (scsi_debug_no_uld)
2445 sdp->no_uld_attach = 1;
2446 return 0;
2447 }
2448
2449 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2450 {
2451 struct sdebug_dev_info *devip =
2452 (struct sdebug_dev_info *)sdp->hostdata;
2453
2454 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2455 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2456 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2457 if (devip) {
2458 /* make this slot available for re-use */
2459 devip->used = 0;
2460 sdp->hostdata = NULL;
2461 }
2462 }
2463
2464 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2465 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2466 {
2467 unsigned long iflags;
2468 int k;
2469 struct sdebug_queued_cmd *sqcp;
2470
2471 spin_lock_irqsave(&queued_arr_lock, iflags);
2472 for (k = 0; k < scsi_debug_max_queue; ++k) {
2473 sqcp = &queued_arr[k];
2474 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2475 del_timer_sync(&sqcp->cmnd_timer);
2476 sqcp->in_use = 0;
2477 sqcp->a_cmnd = NULL;
2478 break;
2479 }
2480 }
2481 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2482 return (k < scsi_debug_max_queue) ? 1 : 0;
2483 }
2484
2485 /* Deletes (stops) timers of all queued commands */
2486 static void stop_all_queued(void)
2487 {
2488 unsigned long iflags;
2489 int k;
2490 struct sdebug_queued_cmd *sqcp;
2491
2492 spin_lock_irqsave(&queued_arr_lock, iflags);
2493 for (k = 0; k < scsi_debug_max_queue; ++k) {
2494 sqcp = &queued_arr[k];
2495 if (sqcp->in_use && sqcp->a_cmnd) {
2496 del_timer_sync(&sqcp->cmnd_timer);
2497 sqcp->in_use = 0;
2498 sqcp->a_cmnd = NULL;
2499 }
2500 }
2501 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2502 }
2503
2504 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2505 {
2506 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2507 printk(KERN_INFO "scsi_debug: abort\n");
2508 ++num_aborts;
2509 stop_queued_cmnd(SCpnt);
2510 return SUCCESS;
2511 }
2512
2513 static int scsi_debug_biosparam(struct scsi_device *sdev,
2514 struct block_device * bdev, sector_t capacity, int *info)
2515 {
2516 int res;
2517 unsigned char *buf;
2518
2519 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2520 printk(KERN_INFO "scsi_debug: biosparam\n");
2521 buf = scsi_bios_ptable(bdev);
2522 if (buf) {
2523 res = scsi_partsize(buf, capacity,
2524 &info[2], &info[0], &info[1]);
2525 kfree(buf);
2526 if (! res)
2527 return res;
2528 }
2529 info[0] = sdebug_heads;
2530 info[1] = sdebug_sectors_per;
2531 info[2] = sdebug_cylinders_per;
2532 return 0;
2533 }
2534
2535 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2536 {
2537 struct sdebug_dev_info * devip;
2538
2539 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2540 printk(KERN_INFO "scsi_debug: device_reset\n");
2541 ++num_dev_resets;
2542 if (SCpnt) {
2543 devip = devInfoReg(SCpnt->device);
2544 if (devip)
2545 devip->reset = 1;
2546 }
2547 return SUCCESS;
2548 }
2549
2550 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2551 {
2552 struct sdebug_host_info *sdbg_host;
2553 struct sdebug_dev_info * dev_info;
2554 struct scsi_device * sdp;
2555 struct Scsi_Host * hp;
2556
2557 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2558 printk(KERN_INFO "scsi_debug: bus_reset\n");
2559 ++num_bus_resets;
2560 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2561 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2562 if (sdbg_host) {
2563 list_for_each_entry(dev_info,
2564 &sdbg_host->dev_info_list,
2565 dev_list)
2566 dev_info->reset = 1;
2567 }
2568 }
2569 return SUCCESS;
2570 }
2571
2572 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2573 {
2574 struct sdebug_host_info * sdbg_host;
2575 struct sdebug_dev_info * dev_info;
2576
2577 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2578 printk(KERN_INFO "scsi_debug: host_reset\n");
2579 ++num_host_resets;
2580 spin_lock(&sdebug_host_list_lock);
2581 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2582 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2583 dev_list)
2584 dev_info->reset = 1;
2585 }
2586 spin_unlock(&sdebug_host_list_lock);
2587 stop_all_queued();
2588 return SUCCESS;
2589 }
2590
2591 /* Initializes timers in queued array */
2592 static void __init init_all_queued(void)
2593 {
2594 unsigned long iflags;
2595 int k;
2596 struct sdebug_queued_cmd * sqcp;
2597
2598 spin_lock_irqsave(&queued_arr_lock, iflags);
2599 for (k = 0; k < scsi_debug_max_queue; ++k) {
2600 sqcp = &queued_arr[k];
2601 init_timer(&sqcp->cmnd_timer);
2602 sqcp->in_use = 0;
2603 sqcp->a_cmnd = NULL;
2604 }
2605 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2606 }
2607
2608 static void __init sdebug_build_parts(unsigned char *ramp,
2609 unsigned long store_size)
2610 {
2611 struct partition * pp;
2612 int starts[SDEBUG_MAX_PARTS + 2];
2613 int sectors_per_part, num_sectors, k;
2614 int heads_by_sects, start_sec, end_sec;
2615
2616 /* assume partition table already zeroed */
2617 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2618 return;
2619 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2620 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2621 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2622 "partitions to %d\n", SDEBUG_MAX_PARTS);
2623 }
2624 num_sectors = (int)sdebug_store_sectors;
2625 sectors_per_part = (num_sectors - sdebug_sectors_per)
2626 / scsi_debug_num_parts;
2627 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2628 starts[0] = sdebug_sectors_per;
2629 for (k = 1; k < scsi_debug_num_parts; ++k)
2630 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2631 * heads_by_sects;
2632 starts[scsi_debug_num_parts] = num_sectors;
2633 starts[scsi_debug_num_parts + 1] = 0;
2634
2635 ramp[510] = 0x55; /* magic partition markings */
2636 ramp[511] = 0xAA;
2637 pp = (struct partition *)(ramp + 0x1be);
2638 for (k = 0; starts[k + 1]; ++k, ++pp) {
2639 start_sec = starts[k];
2640 end_sec = starts[k + 1] - 1;
2641 pp->boot_ind = 0;
2642
2643 pp->cyl = start_sec / heads_by_sects;
2644 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2645 / sdebug_sectors_per;
2646 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2647
2648 pp->end_cyl = end_sec / heads_by_sects;
2649 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2650 / sdebug_sectors_per;
2651 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2652
2653 pp->start_sect = start_sec;
2654 pp->nr_sects = end_sec - start_sec + 1;
2655 pp->sys_ind = 0x83; /* plain Linux partition */
2656 }
2657 }
2658
2659 static int schedule_resp(struct scsi_cmnd * cmnd,
2660 struct sdebug_dev_info * devip,
2661 done_funct_t done, int scsi_result, int delta_jiff)
2662 {
2663 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2664 if (scsi_result) {
2665 struct scsi_device * sdp = cmnd->device;
2666
2667 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2668 "non-zero result=0x%x\n", sdp->host->host_no,
2669 sdp->channel, sdp->id, sdp->lun, scsi_result);
2670 }
2671 }
2672 if (cmnd && devip) {
2673 /* simulate autosense by this driver */
2674 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2675 memcpy(cmnd->sense_buffer, devip->sense_buff,
2676 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2677 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2678 }
2679 if (delta_jiff <= 0) {
2680 if (cmnd)
2681 cmnd->result = scsi_result;
2682 if (done)
2683 done(cmnd);
2684 return 0;
2685 } else {
2686 unsigned long iflags;
2687 int k;
2688 struct sdebug_queued_cmd * sqcp = NULL;
2689
2690 spin_lock_irqsave(&queued_arr_lock, iflags);
2691 for (k = 0; k < scsi_debug_max_queue; ++k) {
2692 sqcp = &queued_arr[k];
2693 if (! sqcp->in_use)
2694 break;
2695 }
2696 if (k >= scsi_debug_max_queue) {
2697 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2698 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2699 return 1; /* report busy to mid level */
2700 }
2701 sqcp->in_use = 1;
2702 sqcp->a_cmnd = cmnd;
2703 sqcp->scsi_result = scsi_result;
2704 sqcp->done_funct = done;
2705 sqcp->cmnd_timer.function = timer_intr_handler;
2706 sqcp->cmnd_timer.data = k;
2707 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2708 add_timer(&sqcp->cmnd_timer);
2709 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2710 if (cmnd)
2711 cmnd->result = 0;
2712 return 0;
2713 }
2714 }
2715 /* Note: The following macros create attribute files in the
2716 /sys/module/scsi_debug/parameters directory. Unfortunately this
2717 driver is unaware of a change and cannot trigger auxiliary actions
2718 as it can when the corresponding attribute in the
2719 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2720 */
2721 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2722 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2723 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2724 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2725 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2726 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2727 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2728 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2729 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2730 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2731 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2732 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2733 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2734 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2735 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2736 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2737 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2738 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2739 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2740 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2741 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2742 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2743 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2744 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2745 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2746 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2747 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2748 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2749 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2750 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2751 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2752 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2753 S_IRUGO | S_IWUSR);
2754 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2755 S_IRUGO | S_IWUSR);
2756
2757 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2758 MODULE_DESCRIPTION("SCSI debug adapter driver");
2759 MODULE_LICENSE("GPL");
2760 MODULE_VERSION(SCSI_DEBUG_VERSION);
2761
2762 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2763 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2764 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2765 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2766 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2767 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2768 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2769 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2770 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2771 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2772 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2773 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2774 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2775 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2776 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2777 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2778 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2779 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2780 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2781 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2782 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2783 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2784 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2785 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2786 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2787 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2788 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2789 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2790 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2791 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2792 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2793 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2794 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2795
2796 static char sdebug_info[256];
2797
2798 static const char * scsi_debug_info(struct Scsi_Host * shp)
2799 {
2800 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2801 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2802 scsi_debug_version_date, scsi_debug_dev_size_mb,
2803 scsi_debug_opts);
2804 return sdebug_info;
2805 }
2806
2807 /* scsi_debug_proc_info
2808 * Used if the driver currently has no own support for /proc/scsi
2809 */
2810 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2811 int length, int inout)
2812 {
2813 int len, pos, begin;
2814 int orig_length;
2815
2816 orig_length = length;
2817
2818 if (inout == 1) {
2819 char arr[16];
2820 int minLen = length > 15 ? 15 : length;
2821
2822 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2823 return -EACCES;
2824 memcpy(arr, buffer, minLen);
2825 arr[minLen] = '\0';
2826 if (1 != sscanf(arr, "%d", &pos))
2827 return -EINVAL;
2828 scsi_debug_opts = pos;
2829 if (scsi_debug_every_nth != 0)
2830 scsi_debug_cmnd_count = 0;
2831 return length;
2832 }
2833 begin = 0;
2834 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2835 "%s [%s]\n"
2836 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2837 "every_nth=%d(curr:%d)\n"
2838 "delay=%d, max_luns=%d, scsi_level=%d\n"
2839 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2840 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2841 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2842 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2843 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2844 scsi_debug_cmnd_count, scsi_debug_delay,
2845 scsi_debug_max_luns, scsi_debug_scsi_level,
2846 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2847 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2848 num_host_resets, dix_reads, dix_writes, dif_errors);
2849 if (pos < offset) {
2850 len = 0;
2851 begin = pos;
2852 }
2853 *start = buffer + (offset - begin); /* Start of wanted data */
2854 len -= (offset - begin);
2855 if (len > length)
2856 len = length;
2857 return len;
2858 }
2859
2860 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2861 {
2862 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2863 }
2864
2865 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2866 const char * buf, size_t count)
2867 {
2868 int delay;
2869 char work[20];
2870
2871 if (1 == sscanf(buf, "%10s", work)) {
2872 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2873 scsi_debug_delay = delay;
2874 return count;
2875 }
2876 }
2877 return -EINVAL;
2878 }
2879 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2880 sdebug_delay_store);
2881
2882 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2883 {
2884 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2885 }
2886
2887 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2888 const char * buf, size_t count)
2889 {
2890 int opts;
2891 char work[20];
2892
2893 if (1 == sscanf(buf, "%10s", work)) {
2894 if (0 == strnicmp(work,"0x", 2)) {
2895 if (1 == sscanf(&work[2], "%x", &opts))
2896 goto opts_done;
2897 } else {
2898 if (1 == sscanf(work, "%d", &opts))
2899 goto opts_done;
2900 }
2901 }
2902 return -EINVAL;
2903 opts_done:
2904 scsi_debug_opts = opts;
2905 scsi_debug_cmnd_count = 0;
2906 return count;
2907 }
2908 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2909 sdebug_opts_store);
2910
2911 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2912 {
2913 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2914 }
2915 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2916 const char * buf, size_t count)
2917 {
2918 int n;
2919
2920 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2921 scsi_debug_ptype = n;
2922 return count;
2923 }
2924 return -EINVAL;
2925 }
2926 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2927
2928 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2929 {
2930 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2931 }
2932 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2933 const char * buf, size_t count)
2934 {
2935 int n;
2936
2937 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2938 scsi_debug_dsense = n;
2939 return count;
2940 }
2941 return -EINVAL;
2942 }
2943 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2944 sdebug_dsense_store);
2945
2946 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2947 {
2948 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2949 }
2950 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2951 const char * buf, size_t count)
2952 {
2953 int n;
2954
2955 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2956 scsi_debug_fake_rw = n;
2957 return count;
2958 }
2959 return -EINVAL;
2960 }
2961 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2962 sdebug_fake_rw_store);
2963
2964 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2965 {
2966 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2967 }
2968 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2969 const char * buf, size_t count)
2970 {
2971 int n;
2972
2973 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2974 scsi_debug_no_lun_0 = n;
2975 return count;
2976 }
2977 return -EINVAL;
2978 }
2979 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2980 sdebug_no_lun_0_store);
2981
2982 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2983 {
2984 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2985 }
2986 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2987 const char * buf, size_t count)
2988 {
2989 int n;
2990
2991 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2992 scsi_debug_num_tgts = n;
2993 sdebug_max_tgts_luns();
2994 return count;
2995 }
2996 return -EINVAL;
2997 }
2998 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2999 sdebug_num_tgts_store);
3000
3001 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3002 {
3003 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3004 }
3005 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3006
3007 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3008 {
3009 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3010 }
3011 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3012
3013 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3014 {
3015 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3016 }
3017 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3018 const char * buf, size_t count)
3019 {
3020 int nth;
3021
3022 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3023 scsi_debug_every_nth = nth;
3024 scsi_debug_cmnd_count = 0;
3025 return count;
3026 }
3027 return -EINVAL;
3028 }
3029 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3030 sdebug_every_nth_store);
3031
3032 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3033 {
3034 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3035 }
3036 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3037 const char * buf, size_t count)
3038 {
3039 int n;
3040
3041 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3042 scsi_debug_max_luns = n;
3043 sdebug_max_tgts_luns();
3044 return count;
3045 }
3046 return -EINVAL;
3047 }
3048 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3049 sdebug_max_luns_store);
3050
3051 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3052 {
3053 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3054 }
3055 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3056 const char * buf, size_t count)
3057 {
3058 int n;
3059
3060 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3061 (n <= SCSI_DEBUG_CANQUEUE)) {
3062 scsi_debug_max_queue = n;
3063 return count;
3064 }
3065 return -EINVAL;
3066 }
3067 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3068 sdebug_max_queue_store);
3069
3070 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3071 {
3072 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3073 }
3074 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3075
3076 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3077 {
3078 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3079 }
3080 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3081
3082 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3083 {
3084 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3085 }
3086 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3087 const char * buf, size_t count)
3088 {
3089 int n;
3090
3091 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3092 scsi_debug_virtual_gb = n;
3093
3094 sdebug_capacity = get_sdebug_capacity();
3095
3096 return count;
3097 }
3098 return -EINVAL;
3099 }
3100 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3101 sdebug_virtual_gb_store);
3102
3103 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3104 {
3105 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3106 }
3107
3108 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3109 const char * buf, size_t count)
3110 {
3111 int delta_hosts;
3112
3113 if (sscanf(buf, "%d", &delta_hosts) != 1)
3114 return -EINVAL;
3115 if (delta_hosts > 0) {
3116 do {
3117 sdebug_add_adapter();
3118 } while (--delta_hosts);
3119 } else if (delta_hosts < 0) {
3120 do {
3121 sdebug_remove_adapter();
3122 } while (++delta_hosts);
3123 }
3124 return count;
3125 }
3126 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3127 sdebug_add_host_store);
3128
3129 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3130 char * buf)
3131 {
3132 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3133 }
3134 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3135 const char * buf, size_t count)
3136 {
3137 int n;
3138
3139 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3140 scsi_debug_vpd_use_hostno = n;
3141 return count;
3142 }
3143 return -EINVAL;
3144 }
3145 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3146 sdebug_vpd_use_hostno_store);
3147
3148 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3149 {
3150 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3151 }
3152 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3153
3154 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3155 {
3156 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3157 }
3158 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3159
3160 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3161 {
3162 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3163 }
3164 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3165
3166 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3167 {
3168 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3169 }
3170 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3171
3172 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3173 {
3174 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3175 }
3176 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3177
3178 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3179 {
3180 ssize_t count;
3181
3182 if (!scsi_debug_lbp())
3183 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3184 sdebug_store_sectors);
3185
3186 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3187
3188 buf[count++] = '\n';
3189 buf[count++] = 0;
3190
3191 return count;
3192 }
3193 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3194
3195
3196 /* Note: The following function creates attribute files in the
3197 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3198 files (over those found in the /sys/module/scsi_debug/parameters
3199 directory) is that auxiliary actions can be triggered when an attribute
3200 is changed. For example see: sdebug_add_host_store() above.
3201 */
3202 static int do_create_driverfs_files(void)
3203 {
3204 int ret;
3205
3206 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3207 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3208 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3209 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3210 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3211 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3212 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3213 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3214 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3215 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3216 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3217 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3218 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3219 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3220 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3221 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3222 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3223 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3224 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3225 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3226 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3227 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3228 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3229 return ret;
3230 }
3231
3232 static void do_remove_driverfs_files(void)
3233 {
3234 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3235 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3236 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3237 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3238 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3239 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3240 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3241 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3242 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3243 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3244 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3245 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3246 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3247 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3248 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3249 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3250 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3251 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3252 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3253 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3254 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3255 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3256 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3257 }
3258
3259 struct device *pseudo_primary;
3260
3261 static int __init scsi_debug_init(void)
3262 {
3263 unsigned long sz;
3264 int host_to_add;
3265 int k;
3266 int ret;
3267
3268 switch (scsi_debug_sector_size) {
3269 case 512:
3270 case 1024:
3271 case 2048:
3272 case 4096:
3273 break;
3274 default:
3275 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3276 scsi_debug_sector_size);
3277 return -EINVAL;
3278 }
3279
3280 switch (scsi_debug_dif) {
3281
3282 case SD_DIF_TYPE0_PROTECTION:
3283 case SD_DIF_TYPE1_PROTECTION:
3284 case SD_DIF_TYPE2_PROTECTION:
3285 case SD_DIF_TYPE3_PROTECTION:
3286 break;
3287
3288 default:
3289 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3290 return -EINVAL;
3291 }
3292
3293 if (scsi_debug_guard > 1) {
3294 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3295 return -EINVAL;
3296 }
3297
3298 if (scsi_debug_ato > 1) {
3299 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3300 return -EINVAL;
3301 }
3302
3303 if (scsi_debug_physblk_exp > 15) {
3304 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3305 scsi_debug_physblk_exp);
3306 return -EINVAL;
3307 }
3308
3309 if (scsi_debug_lowest_aligned > 0x3fff) {
3310 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3311 scsi_debug_lowest_aligned);
3312 return -EINVAL;
3313 }
3314
3315 if (scsi_debug_dev_size_mb < 1)
3316 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3317 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3318 sdebug_store_sectors = sz / scsi_debug_sector_size;
3319 sdebug_capacity = get_sdebug_capacity();
3320
3321 /* play around with geometry, don't waste too much on track 0 */
3322 sdebug_heads = 8;
3323 sdebug_sectors_per = 32;
3324 if (scsi_debug_dev_size_mb >= 16)
3325 sdebug_heads = 32;
3326 else if (scsi_debug_dev_size_mb >= 256)
3327 sdebug_heads = 64;
3328 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3329 (sdebug_sectors_per * sdebug_heads);
3330 if (sdebug_cylinders_per >= 1024) {
3331 /* other LLDs do this; implies >= 1GB ram disk ... */
3332 sdebug_heads = 255;
3333 sdebug_sectors_per = 63;
3334 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3335 (sdebug_sectors_per * sdebug_heads);
3336 }
3337
3338 fake_storep = vmalloc(sz);
3339 if (NULL == fake_storep) {
3340 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3341 return -ENOMEM;
3342 }
3343 memset(fake_storep, 0, sz);
3344 if (scsi_debug_num_parts > 0)
3345 sdebug_build_parts(fake_storep, sz);
3346
3347 if (scsi_debug_dif) {
3348 int dif_size;
3349
3350 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3351 dif_storep = vmalloc(dif_size);
3352
3353 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3354 dif_size, dif_storep);
3355
3356 if (dif_storep == NULL) {
3357 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3358 ret = -ENOMEM;
3359 goto free_vm;
3360 }
3361
3362 memset(dif_storep, 0xff, dif_size);
3363 }
3364
3365 /* Logical Block Provisioning */
3366 if (scsi_debug_lbp()) {
3367 unsigned int map_bytes;
3368
3369 scsi_debug_unmap_max_blocks =
3370 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3371
3372 scsi_debug_unmap_max_desc =
3373 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3374
3375 scsi_debug_unmap_granularity =
3376 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3377
3378 if (scsi_debug_unmap_alignment &&
3379 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3380 printk(KERN_ERR
3381 "%s: ERR: unmap_granularity < unmap_alignment\n",
3382 __func__);
3383 return -EINVAL;
3384 }
3385
3386 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3387 map_bytes = map_size >> 3;
3388 map_storep = vmalloc(map_bytes);
3389
3390 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3391 map_size);
3392
3393 if (map_storep == NULL) {
3394 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3395 ret = -ENOMEM;
3396 goto free_vm;
3397 }
3398
3399 memset(map_storep, 0x0, map_bytes);
3400
3401 /* Map first 1KB for partition table */
3402 if (scsi_debug_num_parts)
3403 map_region(0, 2);
3404 }
3405
3406 pseudo_primary = root_device_register("pseudo_0");
3407 if (IS_ERR(pseudo_primary)) {
3408 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3409 ret = PTR_ERR(pseudo_primary);
3410 goto free_vm;
3411 }
3412 ret = bus_register(&pseudo_lld_bus);
3413 if (ret < 0) {
3414 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3415 ret);
3416 goto dev_unreg;
3417 }
3418 ret = driver_register(&sdebug_driverfs_driver);
3419 if (ret < 0) {
3420 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3421 ret);
3422 goto bus_unreg;
3423 }
3424 ret = do_create_driverfs_files();
3425 if (ret < 0) {
3426 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3427 ret);
3428 goto del_files;
3429 }
3430
3431 init_all_queued();
3432
3433 host_to_add = scsi_debug_add_host;
3434 scsi_debug_add_host = 0;
3435
3436 for (k = 0; k < host_to_add; k++) {
3437 if (sdebug_add_adapter()) {
3438 printk(KERN_ERR "scsi_debug_init: "
3439 "sdebug_add_adapter failed k=%d\n", k);
3440 break;
3441 }
3442 }
3443
3444 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3445 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3446 scsi_debug_add_host);
3447 }
3448 return 0;
3449
3450 del_files:
3451 do_remove_driverfs_files();
3452 driver_unregister(&sdebug_driverfs_driver);
3453 bus_unreg:
3454 bus_unregister(&pseudo_lld_bus);
3455 dev_unreg:
3456 root_device_unregister(pseudo_primary);
3457 free_vm:
3458 if (map_storep)
3459 vfree(map_storep);
3460 if (dif_storep)
3461 vfree(dif_storep);
3462 vfree(fake_storep);
3463
3464 return ret;
3465 }
3466
3467 static void __exit scsi_debug_exit(void)
3468 {
3469 int k = scsi_debug_add_host;
3470
3471 stop_all_queued();
3472 for (; k; k--)
3473 sdebug_remove_adapter();
3474 do_remove_driverfs_files();
3475 driver_unregister(&sdebug_driverfs_driver);
3476 bus_unregister(&pseudo_lld_bus);
3477 root_device_unregister(pseudo_primary);
3478
3479 if (dif_storep)
3480 vfree(dif_storep);
3481
3482 vfree(fake_storep);
3483 }
3484
3485 device_initcall(scsi_debug_init);
3486 module_exit(scsi_debug_exit);
3487
3488 static void sdebug_release_adapter(struct device * dev)
3489 {
3490 struct sdebug_host_info *sdbg_host;
3491
3492 sdbg_host = to_sdebug_host(dev);
3493 kfree(sdbg_host);
3494 }
3495
3496 static int sdebug_add_adapter(void)
3497 {
3498 int k, devs_per_host;
3499 int error = 0;
3500 struct sdebug_host_info *sdbg_host;
3501 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3502
3503 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3504 if (NULL == sdbg_host) {
3505 printk(KERN_ERR "%s: out of memory at line %d\n",
3506 __func__, __LINE__);
3507 return -ENOMEM;
3508 }
3509
3510 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3511
3512 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3513 for (k = 0; k < devs_per_host; k++) {
3514 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3515 if (!sdbg_devinfo) {
3516 printk(KERN_ERR "%s: out of memory at line %d\n",
3517 __func__, __LINE__);
3518 error = -ENOMEM;
3519 goto clean;
3520 }
3521 }
3522
3523 spin_lock(&sdebug_host_list_lock);
3524 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3525 spin_unlock(&sdebug_host_list_lock);
3526
3527 sdbg_host->dev.bus = &pseudo_lld_bus;
3528 sdbg_host->dev.parent = pseudo_primary;
3529 sdbg_host->dev.release = &sdebug_release_adapter;
3530 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3531
3532 error = device_register(&sdbg_host->dev);
3533
3534 if (error)
3535 goto clean;
3536
3537 ++scsi_debug_add_host;
3538 return error;
3539
3540 clean:
3541 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3542 dev_list) {
3543 list_del(&sdbg_devinfo->dev_list);
3544 kfree(sdbg_devinfo);
3545 }
3546
3547 kfree(sdbg_host);
3548 return error;
3549 }
3550
3551 static void sdebug_remove_adapter(void)
3552 {
3553 struct sdebug_host_info * sdbg_host = NULL;
3554
3555 spin_lock(&sdebug_host_list_lock);
3556 if (!list_empty(&sdebug_host_list)) {
3557 sdbg_host = list_entry(sdebug_host_list.prev,
3558 struct sdebug_host_info, host_list);
3559 list_del(&sdbg_host->host_list);
3560 }
3561 spin_unlock(&sdebug_host_list_lock);
3562
3563 if (!sdbg_host)
3564 return;
3565
3566 device_unregister(&sdbg_host->dev);
3567 --scsi_debug_add_host;
3568 }
3569
3570 static
3571 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3572 {
3573 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3574 int len, k;
3575 unsigned int num;
3576 unsigned long long lba;
3577 u32 ei_lba;
3578 int errsts = 0;
3579 int target = SCpnt->device->id;
3580 struct sdebug_dev_info *devip = NULL;
3581 int inj_recovered = 0;
3582 int inj_transport = 0;
3583 int inj_dif = 0;
3584 int inj_dix = 0;
3585 int delay_override = 0;
3586 int unmap = 0;
3587
3588 scsi_set_resid(SCpnt, 0);
3589 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3590 printk(KERN_INFO "scsi_debug: cmd ");
3591 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3592 printk("%02x ", (int)cmd[k]);
3593 printk("\n");
3594 }
3595
3596 if (target == SCpnt->device->host->hostt->this_id) {
3597 printk(KERN_INFO "scsi_debug: initiator's id used as "
3598 "target!\n");
3599 return schedule_resp(SCpnt, NULL, done,
3600 DID_NO_CONNECT << 16, 0);
3601 }
3602
3603 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3604 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3605 return schedule_resp(SCpnt, NULL, done,
3606 DID_NO_CONNECT << 16, 0);
3607 devip = devInfoReg(SCpnt->device);
3608 if (NULL == devip)
3609 return schedule_resp(SCpnt, NULL, done,
3610 DID_NO_CONNECT << 16, 0);
3611
3612 if ((scsi_debug_every_nth != 0) &&
3613 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3614 scsi_debug_cmnd_count = 0;
3615 if (scsi_debug_every_nth < -1)
3616 scsi_debug_every_nth = -1;
3617 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3618 return 0; /* ignore command causing timeout */
3619 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3620 scsi_medium_access_command(SCpnt))
3621 return 0; /* time out reads and writes */
3622 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3623 inj_recovered = 1; /* to reads and writes below */
3624 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3625 inj_transport = 1; /* to reads and writes below */
3626 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3627 inj_dif = 1; /* to reads and writes below */
3628 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3629 inj_dix = 1; /* to reads and writes below */
3630 }
3631
3632 if (devip->wlun) {
3633 switch (*cmd) {
3634 case INQUIRY:
3635 case REQUEST_SENSE:
3636 case TEST_UNIT_READY:
3637 case REPORT_LUNS:
3638 break; /* only allowable wlun commands */
3639 default:
3640 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3641 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3642 "not supported for wlun\n", *cmd);
3643 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3644 INVALID_OPCODE, 0);
3645 errsts = check_condition_result;
3646 return schedule_resp(SCpnt, devip, done, errsts,
3647 0);
3648 }
3649 }
3650
3651 switch (*cmd) {
3652 case INQUIRY: /* mandatory, ignore unit attention */
3653 delay_override = 1;
3654 errsts = resp_inquiry(SCpnt, target, devip);
3655 break;
3656 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3657 delay_override = 1;
3658 errsts = resp_requests(SCpnt, devip);
3659 break;
3660 case REZERO_UNIT: /* actually this is REWIND for SSC */
3661 case START_STOP:
3662 errsts = resp_start_stop(SCpnt, devip);
3663 break;
3664 case ALLOW_MEDIUM_REMOVAL:
3665 errsts = check_readiness(SCpnt, 1, devip);
3666 if (errsts)
3667 break;
3668 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3669 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3670 cmd[4] ? "inhibited" : "enabled");
3671 break;
3672 case SEND_DIAGNOSTIC: /* mandatory */
3673 errsts = check_readiness(SCpnt, 1, devip);
3674 break;
3675 case TEST_UNIT_READY: /* mandatory */
3676 delay_override = 1;
3677 errsts = check_readiness(SCpnt, 0, devip);
3678 break;
3679 case RESERVE:
3680 errsts = check_readiness(SCpnt, 1, devip);
3681 break;
3682 case RESERVE_10:
3683 errsts = check_readiness(SCpnt, 1, devip);
3684 break;
3685 case RELEASE:
3686 errsts = check_readiness(SCpnt, 1, devip);
3687 break;
3688 case RELEASE_10:
3689 errsts = check_readiness(SCpnt, 1, devip);
3690 break;
3691 case READ_CAPACITY:
3692 errsts = resp_readcap(SCpnt, devip);
3693 break;
3694 case SERVICE_ACTION_IN:
3695 if (cmd[1] == SAI_READ_CAPACITY_16)
3696 errsts = resp_readcap16(SCpnt, devip);
3697 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3698
3699 if (scsi_debug_lbp() == 0) {
3700 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3701 INVALID_COMMAND_OPCODE, 0);
3702 errsts = check_condition_result;
3703 } else
3704 errsts = resp_get_lba_status(SCpnt, devip);
3705 } else {
3706 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3707 INVALID_OPCODE, 0);
3708 errsts = check_condition_result;
3709 }
3710 break;
3711 case MAINTENANCE_IN:
3712 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3713 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3714 INVALID_OPCODE, 0);
3715 errsts = check_condition_result;
3716 break;
3717 }
3718 errsts = resp_report_tgtpgs(SCpnt, devip);
3719 break;
3720 case READ_16:
3721 case READ_12:
3722 case READ_10:
3723 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3724 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3725 cmd[1] & 0xe0) {
3726 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3727 INVALID_COMMAND_OPCODE, 0);
3728 errsts = check_condition_result;
3729 break;
3730 }
3731
3732 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3733 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3734 (cmd[1] & 0xe0) == 0)
3735 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3736
3737 /* fall through */
3738 case READ_6:
3739 read:
3740 errsts = check_readiness(SCpnt, 0, devip);
3741 if (errsts)
3742 break;
3743 if (scsi_debug_fake_rw)
3744 break;
3745 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3746 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3747 if (inj_recovered && (0 == errsts)) {
3748 mk_sense_buffer(devip, RECOVERED_ERROR,
3749 THRESHOLD_EXCEEDED, 0);
3750 errsts = check_condition_result;
3751 } else if (inj_transport && (0 == errsts)) {
3752 mk_sense_buffer(devip, ABORTED_COMMAND,
3753 TRANSPORT_PROBLEM, ACK_NAK_TO);
3754 errsts = check_condition_result;
3755 } else if (inj_dif && (0 == errsts)) {
3756 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3757 errsts = illegal_condition_result;
3758 } else if (inj_dix && (0 == errsts)) {
3759 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3760 errsts = illegal_condition_result;
3761 }
3762 break;
3763 case REPORT_LUNS: /* mandatory, ignore unit attention */
3764 delay_override = 1;
3765 errsts = resp_report_luns(SCpnt, devip);
3766 break;
3767 case VERIFY: /* 10 byte SBC-2 command */
3768 errsts = check_readiness(SCpnt, 0, devip);
3769 break;
3770 case WRITE_16:
3771 case WRITE_12:
3772 case WRITE_10:
3773 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3774 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3775 cmd[1] & 0xe0) {
3776 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3777 INVALID_COMMAND_OPCODE, 0);
3778 errsts = check_condition_result;
3779 break;
3780 }
3781
3782 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3783 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3784 (cmd[1] & 0xe0) == 0)
3785 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3786
3787 /* fall through */
3788 case WRITE_6:
3789 write:
3790 errsts = check_readiness(SCpnt, 0, devip);
3791 if (errsts)
3792 break;
3793 if (scsi_debug_fake_rw)
3794 break;
3795 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3796 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3797 if (inj_recovered && (0 == errsts)) {
3798 mk_sense_buffer(devip, RECOVERED_ERROR,
3799 THRESHOLD_EXCEEDED, 0);
3800 errsts = check_condition_result;
3801 } else if (inj_dif && (0 == errsts)) {
3802 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3803 errsts = illegal_condition_result;
3804 } else if (inj_dix && (0 == errsts)) {
3805 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3806 errsts = illegal_condition_result;
3807 }
3808 break;
3809 case WRITE_SAME_16:
3810 case WRITE_SAME:
3811 if (cmd[1] & 0x8) {
3812 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3813 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3814 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3815 INVALID_FIELD_IN_CDB, 0);
3816 errsts = check_condition_result;
3817 } else
3818 unmap = 1;
3819 }
3820 if (errsts)
3821 break;
3822 errsts = check_readiness(SCpnt, 0, devip);
3823 if (errsts)
3824 break;
3825 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3826 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3827 break;
3828 case UNMAP:
3829 errsts = check_readiness(SCpnt, 0, devip);
3830 if (errsts)
3831 break;
3832
3833 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3834 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3835 INVALID_COMMAND_OPCODE, 0);
3836 errsts = check_condition_result;
3837 } else
3838 errsts = resp_unmap(SCpnt, devip);
3839 break;
3840 case MODE_SENSE:
3841 case MODE_SENSE_10:
3842 errsts = resp_mode_sense(SCpnt, target, devip);
3843 break;
3844 case MODE_SELECT:
3845 errsts = resp_mode_select(SCpnt, 1, devip);
3846 break;
3847 case MODE_SELECT_10:
3848 errsts = resp_mode_select(SCpnt, 0, devip);
3849 break;
3850 case LOG_SENSE:
3851 errsts = resp_log_sense(SCpnt, devip);
3852 break;
3853 case SYNCHRONIZE_CACHE:
3854 delay_override = 1;
3855 errsts = check_readiness(SCpnt, 0, devip);
3856 break;
3857 case WRITE_BUFFER:
3858 errsts = check_readiness(SCpnt, 1, devip);
3859 break;
3860 case XDWRITEREAD_10:
3861 if (!scsi_bidi_cmnd(SCpnt)) {
3862 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3863 INVALID_FIELD_IN_CDB, 0);
3864 errsts = check_condition_result;
3865 break;
3866 }
3867
3868 errsts = check_readiness(SCpnt, 0, devip);
3869 if (errsts)
3870 break;
3871 if (scsi_debug_fake_rw)
3872 break;
3873 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3874 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3875 if (errsts)
3876 break;
3877 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3878 if (errsts)
3879 break;
3880 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3881 break;
3882 case VARIABLE_LENGTH_CMD:
3883 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3884
3885 if ((cmd[10] & 0xe0) == 0)
3886 printk(KERN_ERR
3887 "Unprotected RD/WR to DIF device\n");
3888
3889 if (cmd[9] == READ_32) {
3890 BUG_ON(SCpnt->cmd_len < 32);
3891 goto read;
3892 }
3893
3894 if (cmd[9] == WRITE_32) {
3895 BUG_ON(SCpnt->cmd_len < 32);
3896 goto write;
3897 }
3898 }
3899
3900 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3901 INVALID_FIELD_IN_CDB, 0);
3902 errsts = check_condition_result;
3903 break;
3904
3905 default:
3906 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3907 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3908 "supported\n", *cmd);
3909 errsts = check_readiness(SCpnt, 1, devip);
3910 if (errsts)
3911 break; /* Unit attention takes precedence */
3912 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3913 errsts = check_condition_result;
3914 break;
3915 }
3916 return schedule_resp(SCpnt, devip, done, errsts,
3917 (delay_override ? 0 : scsi_debug_delay));
3918 }
3919
3920 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3921
3922 static struct scsi_host_template sdebug_driver_template = {
3923 .proc_info = scsi_debug_proc_info,
3924 .proc_name = sdebug_proc_name,
3925 .name = "SCSI DEBUG",
3926 .info = scsi_debug_info,
3927 .slave_alloc = scsi_debug_slave_alloc,
3928 .slave_configure = scsi_debug_slave_configure,
3929 .slave_destroy = scsi_debug_slave_destroy,
3930 .ioctl = scsi_debug_ioctl,
3931 .queuecommand = scsi_debug_queuecommand,
3932 .eh_abort_handler = scsi_debug_abort,
3933 .eh_bus_reset_handler = scsi_debug_bus_reset,
3934 .eh_device_reset_handler = scsi_debug_device_reset,
3935 .eh_host_reset_handler = scsi_debug_host_reset,
3936 .bios_param = scsi_debug_biosparam,
3937 .can_queue = SCSI_DEBUG_CANQUEUE,
3938 .this_id = 7,
3939 .sg_tablesize = 256,
3940 .cmd_per_lun = 16,
3941 .max_sectors = 0xffff,
3942 .use_clustering = DISABLE_CLUSTERING,
3943 .module = THIS_MODULE,
3944 };
3945
3946 static int sdebug_driver_probe(struct device * dev)
3947 {
3948 int error = 0;
3949 struct sdebug_host_info *sdbg_host;
3950 struct Scsi_Host *hpnt;
3951 int host_prot;
3952
3953 sdbg_host = to_sdebug_host(dev);
3954
3955 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3956 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3957 if (NULL == hpnt) {
3958 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3959 error = -ENODEV;
3960 return error;
3961 }
3962
3963 sdbg_host->shost = hpnt;
3964 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3965 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3966 hpnt->max_id = scsi_debug_num_tgts + 1;
3967 else
3968 hpnt->max_id = scsi_debug_num_tgts;
3969 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3970
3971 host_prot = 0;
3972
3973 switch (scsi_debug_dif) {
3974
3975 case SD_DIF_TYPE1_PROTECTION:
3976 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3977 if (scsi_debug_dix)
3978 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3979 break;
3980
3981 case SD_DIF_TYPE2_PROTECTION:
3982 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3983 if (scsi_debug_dix)
3984 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3985 break;
3986
3987 case SD_DIF_TYPE3_PROTECTION:
3988 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3989 if (scsi_debug_dix)
3990 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3991 break;
3992
3993 default:
3994 if (scsi_debug_dix)
3995 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3996 break;
3997 }
3998
3999 scsi_host_set_prot(hpnt, host_prot);
4000
4001 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4002 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4003 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4004 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4005 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4006 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4007 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4008 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4009
4010 if (scsi_debug_guard == 1)
4011 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4012 else
4013 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4014
4015 error = scsi_add_host(hpnt, &sdbg_host->dev);
4016 if (error) {
4017 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4018 error = -ENODEV;
4019 scsi_host_put(hpnt);
4020 } else
4021 scsi_scan_host(hpnt);
4022
4023
4024 return error;
4025 }
4026
4027 static int sdebug_driver_remove(struct device * dev)
4028 {
4029 struct sdebug_host_info *sdbg_host;
4030 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4031
4032 sdbg_host = to_sdebug_host(dev);
4033
4034 if (!sdbg_host) {
4035 printk(KERN_ERR "%s: Unable to locate host info\n",
4036 __func__);
4037 return -ENODEV;
4038 }
4039
4040 scsi_remove_host(sdbg_host->shost);
4041
4042 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4043 dev_list) {
4044 list_del(&sdbg_devinfo->dev_list);
4045 kfree(sdbg_devinfo);
4046 }
4047
4048 scsi_host_put(sdbg_host->shost);
4049 return 0;
4050 }
4051
4052 static int pseudo_lld_bus_match(struct device *dev,
4053 struct device_driver *dev_driver)
4054 {
4055 return 1;
4056 }
4057
4058 static struct bus_type pseudo_lld_bus = {
4059 .name = "pseudo",
4060 .match = pseudo_lld_bus_match,
4061 .probe = sdebug_driver_probe,
4062 .remove = sdebug_driver_remove,
4063 };
This page took 0.122178 seconds and 6 git commands to generate.