2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date
= "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define ADDR_OUT_OF_RANGE 0x21
72 #define INVALID_COMMAND_OPCODE 0x20
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define POWERON_RESET 0x29
76 #define SAVING_PARAMS_UNSUP 0x39
77 #define TRANSPORT_PROBLEM 0x4b
78 #define THRESHOLD_EXCEEDED 0x5d
79 #define LOW_POWER_COND_ON 0x5e
81 /* Additional Sense Code Qualifier (ASCQ) */
82 #define ACK_NAK_TO 0x3
84 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
86 /* Default values for driver parameters */
87 #define DEF_NUM_HOST 1
88 #define DEF_NUM_TGTS 1
89 #define DEF_MAX_LUNS 1
90 /* With these defaults, this driver will make 1 host with 1 target
91 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
95 #define DEF_DEV_SIZE_MB 8
99 #define DEF_EVERY_NTH 0
100 #define DEF_FAKE_RW 0
104 #define DEF_LBPWS10 0
106 #define DEF_LOWEST_ALIGNED 0
107 #define DEF_NO_LUN_0 0
108 #define DEF_NUM_PARTS 0
110 #define DEF_OPT_BLKS 64
111 #define DEF_PHYSBLK_EXP 0
113 #define DEF_REMOVABLE false
114 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
115 #define DEF_SECTOR_SIZE 512
116 #define DEF_UNMAP_ALIGNMENT 0
117 #define DEF_UNMAP_GRANULARITY 1
118 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
119 #define DEF_UNMAP_MAX_DESC 256
120 #define DEF_VIRTUAL_GB 0
121 #define DEF_VPD_USE_HOSTNO 1
122 #define DEF_WRITESAME_LENGTH 0xFFFF
124 /* bit mask values for scsi_debug_opts */
125 #define SCSI_DEBUG_OPT_NOISE 1
126 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
127 #define SCSI_DEBUG_OPT_TIMEOUT 4
128 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
129 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
130 #define SCSI_DEBUG_OPT_DIF_ERR 32
131 #define SCSI_DEBUG_OPT_DIX_ERR 64
132 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
133 /* When "every_nth" > 0 then modulo "every_nth" commands:
134 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
135 * - a RECOVERED_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
137 * - a TRANSPORT_ERROR is simulated on successful read and write
138 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
140 * When "every_nth" < 0 then after "- every_nth" commands:
141 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
142 * - a RECOVERED_ERROR is simulated on successful read and write
143 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
144 * - a TRANSPORT_ERROR is simulated on successful read and write
145 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
146 * This will continue until some other action occurs (e.g. the user
147 * writing a new value (other than -1 or 1) to every_nth via sysfs).
150 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
151 * sector on read commands: */
152 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
153 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
155 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
156 * or "peripheral device" addressing (value 0) */
157 #define SAM2_LUN_ADDRESS_METHOD 0
158 #define SAM2_WLUN_REPORT_LUNS 0xc101
160 /* Can queue up to this number of commands. Typically commands that
161 * that have a non-zero delay are queued. */
162 #define SCSI_DEBUG_CANQUEUE 255
164 static int scsi_debug_add_host
= DEF_NUM_HOST
;
165 static int scsi_debug_ato
= DEF_ATO
;
166 static int scsi_debug_delay
= DEF_DELAY
;
167 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
168 static int scsi_debug_dif
= DEF_DIF
;
169 static int scsi_debug_dix
= DEF_DIX
;
170 static int scsi_debug_dsense
= DEF_D_SENSE
;
171 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
172 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
173 static unsigned int scsi_debug_guard
= DEF_GUARD
;
174 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
175 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
176 static int scsi_debug_max_queue
= SCSI_DEBUG_CANQUEUE
;
177 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
178 static int scsi_debug_no_uld
= 0;
179 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
180 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
181 static int scsi_debug_opt_blks
= DEF_OPT_BLKS
;
182 static int scsi_debug_opts
= DEF_OPTS
;
183 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
184 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
185 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
186 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
187 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
188 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
189 static unsigned int scsi_debug_lbpu
= DEF_LBPU
;
190 static unsigned int scsi_debug_lbpws
= DEF_LBPWS
;
191 static unsigned int scsi_debug_lbpws10
= DEF_LBPWS10
;
192 static unsigned int scsi_debug_lbprz
= DEF_LBPRZ
;
193 static unsigned int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
194 static unsigned int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
195 static unsigned int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
196 static unsigned int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
197 static unsigned int scsi_debug_write_same_length
= DEF_WRITESAME_LENGTH
;
198 static bool scsi_debug_removable
= DEF_REMOVABLE
;
200 static int scsi_debug_cmnd_count
= 0;
202 #define DEV_READONLY(TGT) (0)
204 static unsigned int sdebug_store_sectors
;
205 static sector_t sdebug_capacity
; /* in sectors */
207 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
208 may still need them */
209 static int sdebug_heads
; /* heads per disk */
210 static int sdebug_cylinders_per
; /* cylinders per surface */
211 static int sdebug_sectors_per
; /* sectors per cylinder */
213 #define SDEBUG_MAX_PARTS 4
215 #define SDEBUG_SENSE_LEN 32
217 #define SCSI_DEBUG_MAX_CMD_LEN 32
219 static unsigned int scsi_debug_lbp(void)
221 return scsi_debug_lbpu
| scsi_debug_lbpws
| scsi_debug_lbpws10
;
224 struct sdebug_dev_info
{
225 struct list_head dev_list
;
226 unsigned char sense_buff
[SDEBUG_SENSE_LEN
]; /* weak nexus */
227 unsigned int channel
;
230 struct sdebug_host_info
*sdbg_host
;
237 struct sdebug_host_info
{
238 struct list_head host_list
;
239 struct Scsi_Host
*shost
;
241 struct list_head dev_info_list
;
244 #define to_sdebug_host(d) \
245 container_of(d, struct sdebug_host_info, dev)
247 static LIST_HEAD(sdebug_host_list
);
248 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
250 typedef void (* done_funct_t
) (struct scsi_cmnd
*);
252 struct sdebug_queued_cmd
{
254 struct timer_list cmnd_timer
;
255 done_funct_t done_funct
;
256 struct scsi_cmnd
* a_cmnd
;
259 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
261 static unsigned char * fake_storep
; /* ramdisk storage */
262 static struct sd_dif_tuple
*dif_storep
; /* protection info */
263 static void *map_storep
; /* provisioning map */
265 static unsigned long map_size
;
266 static int num_aborts
= 0;
267 static int num_dev_resets
= 0;
268 static int num_bus_resets
= 0;
269 static int num_host_resets
= 0;
270 static int dix_writes
;
271 static int dix_reads
;
272 static int dif_errors
;
274 static DEFINE_SPINLOCK(queued_arr_lock
);
275 static DEFINE_RWLOCK(atomic_rw
);
277 static char sdebug_proc_name
[] = "scsi_debug";
279 static struct bus_type pseudo_lld_bus
;
281 static struct device_driver sdebug_driverfs_driver
= {
282 .name
= sdebug_proc_name
,
283 .bus
= &pseudo_lld_bus
,
286 static const int check_condition_result
=
287 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
289 static const int illegal_condition_result
=
290 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
292 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
294 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
297 static void *fake_store(unsigned long long lba
)
299 lba
= do_div(lba
, sdebug_store_sectors
);
301 return fake_storep
+ lba
* scsi_debug_sector_size
;
304 static struct sd_dif_tuple
*dif_store(sector_t sector
)
306 sector
= do_div(sector
, sdebug_store_sectors
);
308 return dif_storep
+ sector
;
311 static int sdebug_add_adapter(void);
312 static void sdebug_remove_adapter(void);
314 static void sdebug_max_tgts_luns(void)
316 struct sdebug_host_info
*sdbg_host
;
317 struct Scsi_Host
*hpnt
;
319 spin_lock(&sdebug_host_list_lock
);
320 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
321 hpnt
= sdbg_host
->shost
;
322 if ((hpnt
->this_id
>= 0) &&
323 (scsi_debug_num_tgts
> hpnt
->this_id
))
324 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
326 hpnt
->max_id
= scsi_debug_num_tgts
;
327 /* scsi_debug_max_luns; */
328 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
;
330 spin_unlock(&sdebug_host_list_lock
);
333 static void mk_sense_buffer(struct sdebug_dev_info
*devip
, int key
,
336 unsigned char *sbuff
;
338 sbuff
= devip
->sense_buff
;
339 memset(sbuff
, 0, SDEBUG_SENSE_LEN
);
341 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
343 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
344 printk(KERN_INFO
"scsi_debug: [sense_key,asc,ascq]: "
345 "[0x%x,0x%x,0x%x]\n", key
, asc
, asq
);
348 static void get_data_transfer_info(unsigned char *cmd
,
349 unsigned long long *lba
, unsigned int *num
,
355 case VARIABLE_LENGTH_CMD
:
356 *lba
= (u64
)cmd
[19] | (u64
)cmd
[18] << 8 |
357 (u64
)cmd
[17] << 16 | (u64
)cmd
[16] << 24 |
358 (u64
)cmd
[15] << 32 | (u64
)cmd
[14] << 40 |
359 (u64
)cmd
[13] << 48 | (u64
)cmd
[12] << 56;
361 *ei_lba
= (u32
)cmd
[23] | (u32
)cmd
[22] << 8 |
362 (u32
)cmd
[21] << 16 | (u32
)cmd
[20] << 24;
364 *num
= (u32
)cmd
[31] | (u32
)cmd
[30] << 8 | (u32
)cmd
[29] << 16 |
371 *lba
= (u64
)cmd
[9] | (u64
)cmd
[8] << 8 |
372 (u64
)cmd
[7] << 16 | (u64
)cmd
[6] << 24 |
373 (u64
)cmd
[5] << 32 | (u64
)cmd
[4] << 40 |
374 (u64
)cmd
[3] << 48 | (u64
)cmd
[2] << 56;
376 *num
= (u32
)cmd
[13] | (u32
)cmd
[12] << 8 | (u32
)cmd
[11] << 16 |
381 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
384 *num
= (u32
)cmd
[9] | (u32
)cmd
[8] << 8 | (u32
)cmd
[7] << 16 |
391 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
394 *num
= (u32
)cmd
[8] | (u32
)cmd
[7] << 8;
398 *lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
399 (u32
)(cmd
[1] & 0x1f) << 16;
400 *num
= (0 == cmd
[4]) ? 256 : cmd
[4];
407 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
409 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
410 printk(KERN_INFO
"scsi_debug: ioctl: cmd=0x%x\n", cmd
);
413 /* return -ENOTTY; // correct return but upsets fdisk */
416 static int check_readiness(struct scsi_cmnd
* SCpnt
, int reset_only
,
417 struct sdebug_dev_info
* devip
)
420 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
421 printk(KERN_INFO
"scsi_debug: Reporting Unit "
422 "attention: power on reset\n");
424 mk_sense_buffer(devip
, UNIT_ATTENTION
, POWERON_RESET
, 0);
425 return check_condition_result
;
427 if ((0 == reset_only
) && devip
->stopped
) {
428 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
429 printk(KERN_INFO
"scsi_debug: Reporting Not "
430 "ready: initializing command required\n");
431 mk_sense_buffer(devip
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
433 return check_condition_result
;
438 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
439 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
443 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
447 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
448 return (DID_ERROR
<< 16);
450 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
452 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
457 /* Returns number of bytes fetched into 'arr' or -1 if error. */
458 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
461 if (!scsi_bufflen(scp
))
463 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
466 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
470 static const char * inq_vendor_id
= "Linux ";
471 static const char * inq_product_id
= "scsi_debug ";
472 static const char * inq_product_rev
= "0004";
474 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
475 int target_dev_id
, int dev_id_num
,
476 const char * dev_id_str
,
482 port_a
= target_dev_id
+ 1;
483 /* T10 vendor identifier field format (faked) */
484 arr
[0] = 0x2; /* ASCII */
487 memcpy(&arr
[4], inq_vendor_id
, 8);
488 memcpy(&arr
[12], inq_product_id
, 16);
489 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
490 num
= 8 + 16 + dev_id_str_len
;
493 if (dev_id_num
>= 0) {
494 /* NAA-5, Logical unit identifier (binary) */
495 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
496 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
499 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
503 arr
[num
++] = (dev_id_num
>> 24);
504 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
505 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
506 arr
[num
++] = dev_id_num
& 0xff;
507 /* Target relative port number */
508 arr
[num
++] = 0x61; /* proto=sas, binary */
509 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
510 arr
[num
++] = 0x0; /* reserved */
511 arr
[num
++] = 0x4; /* length */
512 arr
[num
++] = 0x0; /* reserved */
513 arr
[num
++] = 0x0; /* reserved */
515 arr
[num
++] = 0x1; /* relative port A */
517 /* NAA-5, Target port identifier */
518 arr
[num
++] = 0x61; /* proto=sas, binary */
519 arr
[num
++] = 0x93; /* piv=1, target port, naa */
522 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
526 arr
[num
++] = (port_a
>> 24);
527 arr
[num
++] = (port_a
>> 16) & 0xff;
528 arr
[num
++] = (port_a
>> 8) & 0xff;
529 arr
[num
++] = port_a
& 0xff;
530 /* NAA-5, Target port group identifier */
531 arr
[num
++] = 0x61; /* proto=sas, binary */
532 arr
[num
++] = 0x95; /* piv=1, target port group id */
537 arr
[num
++] = (port_group_id
>> 8) & 0xff;
538 arr
[num
++] = port_group_id
& 0xff;
539 /* NAA-5, Target device identifier */
540 arr
[num
++] = 0x61; /* proto=sas, binary */
541 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
544 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
548 arr
[num
++] = (target_dev_id
>> 24);
549 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
550 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
551 arr
[num
++] = target_dev_id
& 0xff;
552 /* SCSI name string: Target device identifier */
553 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
554 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
557 memcpy(arr
+ num
, "naa.52222220", 12);
559 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
560 memcpy(arr
+ num
, b
, 8);
562 memset(arr
+ num
, 0, 4);
568 static unsigned char vpd84_data
[] = {
569 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
570 0x22,0x22,0x22,0x0,0xbb,0x1,
571 0x22,0x22,0x22,0x0,0xbb,0x2,
574 static int inquiry_evpd_84(unsigned char * arr
)
576 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
577 return sizeof(vpd84_data
);
580 static int inquiry_evpd_85(unsigned char * arr
)
583 const char * na1
= "https://www.kernel.org/config";
584 const char * na2
= "http://www.kernel.org/log";
587 arr
[num
++] = 0x1; /* lu, storage config */
588 arr
[num
++] = 0x0; /* reserved */
593 plen
= ((plen
/ 4) + 1) * 4;
594 arr
[num
++] = plen
; /* length, null termianted, padded */
595 memcpy(arr
+ num
, na1
, olen
);
596 memset(arr
+ num
+ olen
, 0, plen
- olen
);
599 arr
[num
++] = 0x4; /* lu, logging */
600 arr
[num
++] = 0x0; /* reserved */
605 plen
= ((plen
/ 4) + 1) * 4;
606 arr
[num
++] = plen
; /* length, null terminated, padded */
607 memcpy(arr
+ num
, na2
, olen
);
608 memset(arr
+ num
+ olen
, 0, plen
- olen
);
614 /* SCSI ports VPD page */
615 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
620 port_a
= target_dev_id
+ 1;
622 arr
[num
++] = 0x0; /* reserved */
623 arr
[num
++] = 0x0; /* reserved */
625 arr
[num
++] = 0x1; /* relative port 1 (primary) */
626 memset(arr
+ num
, 0, 6);
629 arr
[num
++] = 12; /* length tp descriptor */
630 /* naa-5 target port identifier (A) */
631 arr
[num
++] = 0x61; /* proto=sas, binary */
632 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
633 arr
[num
++] = 0x0; /* reserved */
634 arr
[num
++] = 0x8; /* length */
635 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
639 arr
[num
++] = (port_a
>> 24);
640 arr
[num
++] = (port_a
>> 16) & 0xff;
641 arr
[num
++] = (port_a
>> 8) & 0xff;
642 arr
[num
++] = port_a
& 0xff;
644 arr
[num
++] = 0x0; /* reserved */
645 arr
[num
++] = 0x0; /* reserved */
647 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
648 memset(arr
+ num
, 0, 6);
651 arr
[num
++] = 12; /* length tp descriptor */
652 /* naa-5 target port identifier (B) */
653 arr
[num
++] = 0x61; /* proto=sas, binary */
654 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
655 arr
[num
++] = 0x0; /* reserved */
656 arr
[num
++] = 0x8; /* length */
657 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
661 arr
[num
++] = (port_b
>> 24);
662 arr
[num
++] = (port_b
>> 16) & 0xff;
663 arr
[num
++] = (port_b
>> 8) & 0xff;
664 arr
[num
++] = port_b
& 0xff;
670 static unsigned char vpd89_data
[] = {
671 /* from 4th byte */ 0,0,0,0,
672 'l','i','n','u','x',' ',' ',' ',
673 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
675 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
677 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
678 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
679 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
680 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
682 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
684 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
686 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
687 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
688 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
689 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
690 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
691 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
692 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
697 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
698 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
699 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
714 static int inquiry_evpd_89(unsigned char * arr
)
716 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
717 return sizeof(vpd89_data
);
721 /* Block limits VPD page (SBC-3) */
722 static unsigned char vpdb0_data
[] = {
723 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
724 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
725 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
726 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
729 static int inquiry_evpd_b0(unsigned char * arr
)
733 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
735 /* Optimal transfer length granularity */
736 gran
= 1 << scsi_debug_physblk_exp
;
737 arr
[2] = (gran
>> 8) & 0xff;
738 arr
[3] = gran
& 0xff;
740 /* Maximum Transfer Length */
741 if (sdebug_store_sectors
> 0x400) {
742 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
743 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
744 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
745 arr
[7] = sdebug_store_sectors
& 0xff;
748 /* Optimal Transfer Length */
749 put_unaligned_be32(scsi_debug_opt_blks
, &arr
[8]);
751 if (scsi_debug_lbpu
) {
752 /* Maximum Unmap LBA Count */
753 put_unaligned_be32(scsi_debug_unmap_max_blocks
, &arr
[16]);
755 /* Maximum Unmap Block Descriptor Count */
756 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
759 /* Unmap Granularity Alignment */
760 if (scsi_debug_unmap_alignment
) {
761 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
762 arr
[28] |= 0x80; /* UGAVALID */
765 /* Optimal Unmap Granularity */
766 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
768 /* Maximum WRITE SAME Length */
769 put_unaligned_be64(scsi_debug_write_same_length
, &arr
[32]);
771 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
773 return sizeof(vpdb0_data
);
776 /* Block device characteristics VPD page (SBC-3) */
777 static int inquiry_evpd_b1(unsigned char *arr
)
779 memset(arr
, 0, 0x3c);
781 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
783 arr
[3] = 5; /* less than 1.8" */
788 /* Logical block provisioning VPD page (SBC-3) */
789 static int inquiry_evpd_b2(unsigned char *arr
)
792 arr
[0] = 0; /* threshold exponent */
797 if (scsi_debug_lbpws
)
800 if (scsi_debug_lbpws10
)
803 if (scsi_debug_lbprz
)
809 #define SDEBUG_LONG_INQ_SZ 96
810 #define SDEBUG_MAX_INQ_ARR_SZ 584
812 static int resp_inquiry(struct scsi_cmnd
* scp
, int target
,
813 struct sdebug_dev_info
* devip
)
815 unsigned char pq_pdt
;
817 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
818 int alloc_len
, n
, ret
;
820 alloc_len
= (cmd
[3] << 8) + cmd
[4];
821 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
823 return DID_REQUEUE
<< 16;
825 pq_pdt
= 0x1e; /* present, wlun */
826 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
827 pq_pdt
= 0x7f; /* not present, no device type */
829 pq_pdt
= (scsi_debug_ptype
& 0x1f);
831 if (0x2 & cmd
[1]) { /* CMDDT bit set */
832 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
835 return check_condition_result
;
836 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
837 int lu_id_num
, port_group_id
, target_dev_id
, len
;
839 int host_no
= devip
->sdbg_host
->shost
->host_no
;
841 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
842 (devip
->channel
& 0x7f);
843 if (0 == scsi_debug_vpd_use_hostno
)
845 lu_id_num
= devip
->wlun
? -1 : (((host_no
+ 1) * 2000) +
846 (devip
->target
* 1000) + devip
->lun
);
847 target_dev_id
= ((host_no
+ 1) * 2000) +
848 (devip
->target
* 1000) - 3;
849 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
850 if (0 == cmd
[2]) { /* supported vital product data pages */
851 arr
[1] = cmd
[2]; /*sanity */
853 arr
[n
++] = 0x0; /* this page */
854 arr
[n
++] = 0x80; /* unit serial number */
855 arr
[n
++] = 0x83; /* device identification */
856 arr
[n
++] = 0x84; /* software interface ident. */
857 arr
[n
++] = 0x85; /* management network addresses */
858 arr
[n
++] = 0x86; /* extended inquiry */
859 arr
[n
++] = 0x87; /* mode page policy */
860 arr
[n
++] = 0x88; /* SCSI ports */
861 arr
[n
++] = 0x89; /* ATA information */
862 arr
[n
++] = 0xb0; /* Block limits (SBC) */
863 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
864 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
866 arr
[3] = n
- 4; /* number of supported VPD pages */
867 } else if (0x80 == cmd
[2]) { /* unit serial number */
868 arr
[1] = cmd
[2]; /*sanity */
870 memcpy(&arr
[4], lu_id_str
, len
);
871 } else if (0x83 == cmd
[2]) { /* device identification */
872 arr
[1] = cmd
[2]; /*sanity */
873 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
874 target_dev_id
, lu_id_num
,
876 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
877 arr
[1] = cmd
[2]; /*sanity */
878 arr
[3] = inquiry_evpd_84(&arr
[4]);
879 } else if (0x85 == cmd
[2]) { /* Management network addresses */
880 arr
[1] = cmd
[2]; /*sanity */
881 arr
[3] = inquiry_evpd_85(&arr
[4]);
882 } else if (0x86 == cmd
[2]) { /* extended inquiry */
883 arr
[1] = cmd
[2]; /*sanity */
884 arr
[3] = 0x3c; /* number of following entries */
885 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
886 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
887 else if (scsi_debug_dif
)
888 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
890 arr
[4] = 0x0; /* no protection stuff */
891 arr
[5] = 0x7; /* head of q, ordered + simple q's */
892 } else if (0x87 == cmd
[2]) { /* mode page policy */
893 arr
[1] = cmd
[2]; /*sanity */
894 arr
[3] = 0x8; /* number of following entries */
895 arr
[4] = 0x2; /* disconnect-reconnect mp */
896 arr
[6] = 0x80; /* mlus, shared */
897 arr
[8] = 0x18; /* protocol specific lu */
898 arr
[10] = 0x82; /* mlus, per initiator port */
899 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
900 arr
[1] = cmd
[2]; /*sanity */
901 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
902 } else if (0x89 == cmd
[2]) { /* ATA information */
903 arr
[1] = cmd
[2]; /*sanity */
904 n
= inquiry_evpd_89(&arr
[4]);
907 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
908 arr
[1] = cmd
[2]; /*sanity */
909 arr
[3] = inquiry_evpd_b0(&arr
[4]);
910 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
911 arr
[1] = cmd
[2]; /*sanity */
912 arr
[3] = inquiry_evpd_b1(&arr
[4]);
913 } else if (0xb2 == cmd
[2]) { /* Logical Block Prov. (SBC) */
914 arr
[1] = cmd
[2]; /*sanity */
915 arr
[3] = inquiry_evpd_b2(&arr
[4]);
917 /* Illegal request, invalid field in cdb */
918 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
919 INVALID_FIELD_IN_CDB
, 0);
921 return check_condition_result
;
923 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
924 ret
= fill_from_dev_buffer(scp
, arr
,
925 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
929 /* drops through here for a standard inquiry */
930 arr
[1] = scsi_debug_removable
? 0x80 : 0; /* Removable disk */
931 arr
[2] = scsi_debug_scsi_level
;
932 arr
[3] = 2; /* response_data_format==2 */
933 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
934 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
935 if (0 == scsi_debug_vpd_use_hostno
)
936 arr
[5] = 0x10; /* claim: implicit TGPS */
937 arr
[6] = 0x10; /* claim: MultiP */
938 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
939 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
940 memcpy(&arr
[8], inq_vendor_id
, 8);
941 memcpy(&arr
[16], inq_product_id
, 16);
942 memcpy(&arr
[32], inq_product_rev
, 4);
943 /* version descriptors (2 bytes each) follow */
944 arr
[58] = 0x0; arr
[59] = 0x77; /* SAM-3 ANSI */
945 arr
[60] = 0x3; arr
[61] = 0x14; /* SPC-3 ANSI */
947 if (scsi_debug_ptype
== 0) {
948 arr
[n
++] = 0x3; arr
[n
++] = 0x3d; /* SBC-2 ANSI */
949 } else if (scsi_debug_ptype
== 1) {
950 arr
[n
++] = 0x3; arr
[n
++] = 0x60; /* SSC-2 no version */
952 arr
[n
++] = 0xc; arr
[n
++] = 0xf; /* SAS-1.1 rev 10 */
953 ret
= fill_from_dev_buffer(scp
, arr
,
954 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
959 static int resp_requests(struct scsi_cmnd
* scp
,
960 struct sdebug_dev_info
* devip
)
962 unsigned char * sbuff
;
963 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
964 unsigned char arr
[SDEBUG_SENSE_LEN
];
968 memset(arr
, 0, sizeof(arr
));
969 if (devip
->reset
== 1)
970 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
971 want_dsense
= !!(cmd
[1] & 1) || scsi_debug_dsense
;
972 sbuff
= devip
->sense_buff
;
973 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
976 arr
[1] = 0x0; /* NO_SENSE in sense_key */
977 arr
[2] = THRESHOLD_EXCEEDED
;
978 arr
[3] = 0xff; /* TEST set and MRIE==6 */
981 arr
[2] = 0x0; /* NO_SENSE in sense_key */
982 arr
[7] = 0xa; /* 18 byte sense buffer */
983 arr
[12] = THRESHOLD_EXCEEDED
;
984 arr
[13] = 0xff; /* TEST set and MRIE==6 */
987 memcpy(arr
, sbuff
, SDEBUG_SENSE_LEN
);
988 if ((cmd
[1] & 1) && (! scsi_debug_dsense
)) {
989 /* DESC bit set and sense_buff in fixed format */
990 memset(arr
, 0, sizeof(arr
));
992 arr
[1] = sbuff
[2]; /* sense key */
993 arr
[2] = sbuff
[12]; /* asc */
994 arr
[3] = sbuff
[13]; /* ascq */
998 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
999 return fill_from_dev_buffer(scp
, arr
, len
);
1002 static int resp_start_stop(struct scsi_cmnd
* scp
,
1003 struct sdebug_dev_info
* devip
)
1005 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1006 int power_cond
, errsts
, start
;
1008 if ((errsts
= check_readiness(scp
, 1, devip
)))
1010 power_cond
= (cmd
[4] & 0xf0) >> 4;
1012 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1014 return check_condition_result
;
1017 if (start
== devip
->stopped
)
1018 devip
->stopped
= !start
;
1022 static sector_t
get_sdebug_capacity(void)
1024 if (scsi_debug_virtual_gb
> 0)
1025 return (sector_t
)scsi_debug_virtual_gb
*
1026 (1073741824 / scsi_debug_sector_size
);
1028 return sdebug_store_sectors
;
1031 #define SDEBUG_READCAP_ARR_SZ 8
1032 static int resp_readcap(struct scsi_cmnd
* scp
,
1033 struct sdebug_dev_info
* devip
)
1035 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1039 if ((errsts
= check_readiness(scp
, 1, devip
)))
1041 /* following just in case virtual_gb changed */
1042 sdebug_capacity
= get_sdebug_capacity();
1043 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1044 if (sdebug_capacity
< 0xffffffff) {
1045 capac
= (unsigned int)sdebug_capacity
- 1;
1046 arr
[0] = (capac
>> 24);
1047 arr
[1] = (capac
>> 16) & 0xff;
1048 arr
[2] = (capac
>> 8) & 0xff;
1049 arr
[3] = capac
& 0xff;
1056 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1057 arr
[7] = scsi_debug_sector_size
& 0xff;
1058 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1061 #define SDEBUG_READCAP16_ARR_SZ 32
1062 static int resp_readcap16(struct scsi_cmnd
* scp
,
1063 struct sdebug_dev_info
* devip
)
1065 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1066 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1067 unsigned long long capac
;
1068 int errsts
, k
, alloc_len
;
1070 if ((errsts
= check_readiness(scp
, 1, devip
)))
1072 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1074 /* following just in case virtual_gb changed */
1075 sdebug_capacity
= get_sdebug_capacity();
1076 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1077 capac
= sdebug_capacity
- 1;
1078 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1079 arr
[7 - k
] = capac
& 0xff;
1080 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1081 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1082 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1083 arr
[11] = scsi_debug_sector_size
& 0xff;
1084 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1085 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1087 if (scsi_debug_lbp()) {
1088 arr
[14] |= 0x80; /* LBPME */
1089 if (scsi_debug_lbprz
)
1090 arr
[14] |= 0x40; /* LBPRZ */
1093 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1095 if (scsi_debug_dif
) {
1096 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1097 arr
[12] |= 1; /* PROT_EN */
1100 return fill_from_dev_buffer(scp
, arr
,
1101 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1104 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1106 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1107 struct sdebug_dev_info
* devip
)
1109 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1110 unsigned char * arr
;
1111 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1112 int n
, ret
, alen
, rlen
;
1113 int port_group_a
, port_group_b
, port_a
, port_b
;
1115 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1118 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1120 return DID_REQUEUE
<< 16;
1122 * EVPD page 0x88 states we have two ports, one
1123 * real and a fake port with no device connected.
1124 * So we create two port groups with one port each
1125 * and set the group with port B to unavailable.
1127 port_a
= 0x1; /* relative port A */
1128 port_b
= 0x2; /* relative port B */
1129 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1130 (devip
->channel
& 0x7f);
1131 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1132 (devip
->channel
& 0x7f) + 0x80;
1135 * The asymmetric access state is cycled according to the host_id.
1138 if (0 == scsi_debug_vpd_use_hostno
) {
1139 arr
[n
++] = host_no
% 3; /* Asymm access state */
1140 arr
[n
++] = 0x0F; /* claim: all states are supported */
1142 arr
[n
++] = 0x0; /* Active/Optimized path */
1143 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1145 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1146 arr
[n
++] = port_group_a
& 0xff;
1147 arr
[n
++] = 0; /* Reserved */
1148 arr
[n
++] = 0; /* Status code */
1149 arr
[n
++] = 0; /* Vendor unique */
1150 arr
[n
++] = 0x1; /* One port per group */
1151 arr
[n
++] = 0; /* Reserved */
1152 arr
[n
++] = 0; /* Reserved */
1153 arr
[n
++] = (port_a
>> 8) & 0xff;
1154 arr
[n
++] = port_a
& 0xff;
1155 arr
[n
++] = 3; /* Port unavailable */
1156 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1157 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1158 arr
[n
++] = port_group_b
& 0xff;
1159 arr
[n
++] = 0; /* Reserved */
1160 arr
[n
++] = 0; /* Status code */
1161 arr
[n
++] = 0; /* Vendor unique */
1162 arr
[n
++] = 0x1; /* One port per group */
1163 arr
[n
++] = 0; /* Reserved */
1164 arr
[n
++] = 0; /* Reserved */
1165 arr
[n
++] = (port_b
>> 8) & 0xff;
1166 arr
[n
++] = port_b
& 0xff;
1169 arr
[0] = (rlen
>> 24) & 0xff;
1170 arr
[1] = (rlen
>> 16) & 0xff;
1171 arr
[2] = (rlen
>> 8) & 0xff;
1172 arr
[3] = rlen
& 0xff;
1175 * Return the smallest value of either
1176 * - The allocated length
1177 * - The constructed command length
1178 * - The maximum array size
1181 ret
= fill_from_dev_buffer(scp
, arr
,
1182 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1187 /* <<Following mode page info copied from ST318451LW>> */
1189 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1190 { /* Read-Write Error Recovery page for mode_sense */
1191 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1194 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1196 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1197 return sizeof(err_recov_pg
);
1200 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1201 { /* Disconnect-Reconnect page for mode_sense */
1202 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1203 0, 0, 0, 0, 0, 0, 0, 0};
1205 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1207 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1208 return sizeof(disconnect_pg
);
1211 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1212 { /* Format device page for mode_sense */
1213 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1214 0, 0, 0, 0, 0, 0, 0, 0,
1215 0, 0, 0, 0, 0x40, 0, 0, 0};
1217 memcpy(p
, format_pg
, sizeof(format_pg
));
1218 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1219 p
[11] = sdebug_sectors_per
& 0xff;
1220 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1221 p
[13] = scsi_debug_sector_size
& 0xff;
1222 if (scsi_debug_removable
)
1223 p
[20] |= 0x20; /* should agree with INQUIRY */
1225 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1226 return sizeof(format_pg
);
1229 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1230 { /* Caching page for mode_sense */
1231 unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1232 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1234 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1236 memset(p
+ 2, 0, sizeof(caching_pg
) - 2);
1237 return sizeof(caching_pg
);
1240 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1241 { /* Control mode page for mode_sense */
1242 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1244 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1247 if (scsi_debug_dsense
)
1248 ctrl_m_pg
[2] |= 0x4;
1250 ctrl_m_pg
[2] &= ~0x4;
1253 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1255 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1257 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1258 else if (2 == pcontrol
)
1259 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1260 return sizeof(ctrl_m_pg
);
1264 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1265 { /* Informational Exceptions control mode page for mode_sense */
1266 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1268 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1271 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1273 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1274 else if (2 == pcontrol
)
1275 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1276 return sizeof(iec_m_pg
);
1279 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1280 { /* SAS SSP mode page - short format for mode_sense */
1281 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1282 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1284 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1286 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1287 return sizeof(sas_sf_m_pg
);
1291 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1293 { /* SAS phy control and discover mode page for mode_sense */
1294 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1295 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1296 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1297 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1298 0x2, 0, 0, 0, 0, 0, 0, 0,
1299 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1300 0, 0, 0, 0, 0, 0, 0, 0,
1301 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1302 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1303 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1304 0x3, 0, 0, 0, 0, 0, 0, 0,
1305 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1306 0, 0, 0, 0, 0, 0, 0, 0,
1310 port_a
= target_dev_id
+ 1;
1311 port_b
= port_a
+ 1;
1312 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1313 p
[20] = (port_a
>> 24);
1314 p
[21] = (port_a
>> 16) & 0xff;
1315 p
[22] = (port_a
>> 8) & 0xff;
1316 p
[23] = port_a
& 0xff;
1317 p
[48 + 20] = (port_b
>> 24);
1318 p
[48 + 21] = (port_b
>> 16) & 0xff;
1319 p
[48 + 22] = (port_b
>> 8) & 0xff;
1320 p
[48 + 23] = port_b
& 0xff;
1322 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1323 return sizeof(sas_pcd_m_pg
);
1326 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1327 { /* SAS SSP shared protocol specific port mode subpage */
1328 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1329 0, 0, 0, 0, 0, 0, 0, 0,
1332 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1334 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1335 return sizeof(sas_sha_m_pg
);
1338 #define SDEBUG_MAX_MSENSE_SZ 256
1340 static int resp_mode_sense(struct scsi_cmnd
* scp
, int target
,
1341 struct sdebug_dev_info
* devip
)
1343 unsigned char dbd
, llbaa
;
1344 int pcontrol
, pcode
, subpcode
, bd_len
;
1345 unsigned char dev_spec
;
1346 int k
, alloc_len
, msense_6
, offset
, len
, errsts
, target_dev_id
;
1348 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1349 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1351 if ((errsts
= check_readiness(scp
, 1, devip
)))
1353 dbd
= !!(cmd
[1] & 0x8);
1354 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1355 pcode
= cmd
[2] & 0x3f;
1357 msense_6
= (MODE_SENSE
== cmd
[0]);
1358 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
1359 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
1360 bd_len
= llbaa
? 16 : 8;
1363 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
1364 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1365 if (0x3 == pcontrol
) { /* Saving values not supported */
1366 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
,
1368 return check_condition_result
;
1370 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1371 (devip
->target
* 1000) - 3;
1372 /* set DPOFUA bit for disks */
1373 if (0 == scsi_debug_ptype
)
1374 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
1384 arr
[4] = 0x1; /* set LONGLBA bit */
1385 arr
[7] = bd_len
; /* assume 255 or less */
1389 if ((bd_len
> 0) && (!sdebug_capacity
))
1390 sdebug_capacity
= get_sdebug_capacity();
1393 if (sdebug_capacity
> 0xfffffffe) {
1399 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
1400 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
1401 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
1402 ap
[3] = sdebug_capacity
& 0xff;
1404 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1405 ap
[7] = scsi_debug_sector_size
& 0xff;
1408 } else if (16 == bd_len
) {
1409 unsigned long long capac
= sdebug_capacity
;
1411 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1412 ap
[7 - k
] = capac
& 0xff;
1413 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
1414 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
1415 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
1416 ap
[15] = scsi_debug_sector_size
& 0xff;
1421 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
1422 /* TODO: Control Extension page */
1423 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1425 return check_condition_result
;
1428 case 0x1: /* Read-Write error recovery page, direct access */
1429 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1432 case 0x2: /* Disconnect-Reconnect page, all devices */
1433 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
1436 case 0x3: /* Format device page, direct access */
1437 len
= resp_format_pg(ap
, pcontrol
, target
);
1440 case 0x8: /* Caching page, direct access */
1441 len
= resp_caching_pg(ap
, pcontrol
, target
);
1444 case 0xa: /* Control Mode page, all devices */
1445 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
1448 case 0x19: /* if spc==1 then sas phy, control+discover */
1449 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
1450 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1451 INVALID_FIELD_IN_CDB
, 0);
1452 return check_condition_result
;
1455 if ((0x0 == subpcode
) || (0xff == subpcode
))
1456 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1457 if ((0x1 == subpcode
) || (0xff == subpcode
))
1458 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
1460 if ((0x2 == subpcode
) || (0xff == subpcode
))
1461 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1464 case 0x1c: /* Informational Exceptions Mode page, all devices */
1465 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
1468 case 0x3f: /* Read all Mode pages */
1469 if ((0 == subpcode
) || (0xff == subpcode
)) {
1470 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1471 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
1472 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
1473 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
1474 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
1475 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1476 if (0xff == subpcode
) {
1477 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
1478 target
, target_dev_id
);
1479 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1481 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
1483 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1484 INVALID_FIELD_IN_CDB
, 0);
1485 return check_condition_result
;
1490 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1492 return check_condition_result
;
1495 arr
[0] = offset
- 1;
1497 arr
[0] = ((offset
- 2) >> 8) & 0xff;
1498 arr
[1] = (offset
- 2) & 0xff;
1500 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
1503 #define SDEBUG_MAX_MSELECT_SZ 512
1505 static int resp_mode_select(struct scsi_cmnd
* scp
, int mselect6
,
1506 struct sdebug_dev_info
* devip
)
1508 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
1509 int param_len
, res
, errsts
, mpage
;
1510 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
1511 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1513 if ((errsts
= check_readiness(scp
, 1, devip
)))
1515 memset(arr
, 0, sizeof(arr
));
1518 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
1519 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
1520 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1521 INVALID_FIELD_IN_CDB
, 0);
1522 return check_condition_result
;
1524 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
1526 return (DID_ERROR
<< 16);
1527 else if ((res
< param_len
) &&
1528 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
1529 printk(KERN_INFO
"scsi_debug: mode_select: cdb indicated=%d, "
1530 " IO sent=%d bytes\n", param_len
, res
);
1531 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
1532 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
1534 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1535 INVALID_FIELD_IN_PARAM_LIST
, 0);
1536 return check_condition_result
;
1538 off
= bd_len
+ (mselect6
? 4 : 8);
1539 mpage
= arr
[off
] & 0x3f;
1540 ps
= !!(arr
[off
] & 0x80);
1542 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1543 INVALID_FIELD_IN_PARAM_LIST
, 0);
1544 return check_condition_result
;
1546 spf
= !!(arr
[off
] & 0x40);
1547 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
1549 if ((pg_len
+ off
) > param_len
) {
1550 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1551 PARAMETER_LIST_LENGTH_ERR
, 0);
1552 return check_condition_result
;
1555 case 0xa: /* Control Mode page */
1556 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
1557 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
1558 sizeof(ctrl_m_pg
) - 2);
1559 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
1563 case 0x1c: /* Informational Exceptions Mode page */
1564 if (iec_m_pg
[1] == arr
[off
+ 1]) {
1565 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
1566 sizeof(iec_m_pg
) - 2);
1573 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1574 INVALID_FIELD_IN_PARAM_LIST
, 0);
1575 return check_condition_result
;
1578 static int resp_temp_l_pg(unsigned char * arr
)
1580 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1581 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1584 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
1585 return sizeof(temp_l_pg
);
1588 static int resp_ie_l_pg(unsigned char * arr
)
1590 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1593 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
1594 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
1595 arr
[4] = THRESHOLD_EXCEEDED
;
1598 return sizeof(ie_l_pg
);
1601 #define SDEBUG_MAX_LSENSE_SZ 512
1603 static int resp_log_sense(struct scsi_cmnd
* scp
,
1604 struct sdebug_dev_info
* devip
)
1606 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, errsts
, len
, n
;
1607 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
1608 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1610 if ((errsts
= check_readiness(scp
, 1, devip
)))
1612 memset(arr
, 0, sizeof(arr
));
1616 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1617 INVALID_FIELD_IN_CDB
, 0);
1618 return check_condition_result
;
1620 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1621 pcode
= cmd
[2] & 0x3f;
1622 subpcode
= cmd
[3] & 0xff;
1623 alloc_len
= (cmd
[7] << 8) + cmd
[8];
1625 if (0 == subpcode
) {
1627 case 0x0: /* Supported log pages log page */
1629 arr
[n
++] = 0x0; /* this page */
1630 arr
[n
++] = 0xd; /* Temperature */
1631 arr
[n
++] = 0x2f; /* Informational exceptions */
1634 case 0xd: /* Temperature log page */
1635 arr
[3] = resp_temp_l_pg(arr
+ 4);
1637 case 0x2f: /* Informational exceptions log page */
1638 arr
[3] = resp_ie_l_pg(arr
+ 4);
1641 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1642 INVALID_FIELD_IN_CDB
, 0);
1643 return check_condition_result
;
1645 } else if (0xff == subpcode
) {
1649 case 0x0: /* Supported log pages and subpages log page */
1652 arr
[n
++] = 0x0; /* 0,0 page */
1654 arr
[n
++] = 0xff; /* this page */
1656 arr
[n
++] = 0x0; /* Temperature */
1658 arr
[n
++] = 0x0; /* Informational exceptions */
1661 case 0xd: /* Temperature subpages */
1664 arr
[n
++] = 0x0; /* Temperature */
1667 case 0x2f: /* Informational exceptions subpages */
1670 arr
[n
++] = 0x0; /* Informational exceptions */
1674 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1675 INVALID_FIELD_IN_CDB
, 0);
1676 return check_condition_result
;
1679 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1680 INVALID_FIELD_IN_CDB
, 0);
1681 return check_condition_result
;
1683 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1684 return fill_from_dev_buffer(scp
, arr
,
1685 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1688 static int check_device_access_params(struct sdebug_dev_info
*devi
,
1689 unsigned long long lba
, unsigned int num
)
1691 if (lba
+ num
> sdebug_capacity
) {
1692 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, ADDR_OUT_OF_RANGE
, 0);
1693 return check_condition_result
;
1695 /* transfer length excessive (tie in to block limits VPD page) */
1696 if (num
> sdebug_store_sectors
) {
1697 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
1698 return check_condition_result
;
1703 /* Returns number of bytes copied or -1 if error. */
1704 static int do_device_access(struct scsi_cmnd
*scmd
,
1705 struct sdebug_dev_info
*devi
,
1706 unsigned long long lba
, unsigned int num
, int write
)
1709 unsigned long long block
, rest
= 0;
1710 struct scsi_data_buffer
*sdb
;
1711 enum dma_data_direction dir
;
1712 size_t (*func
)(struct scatterlist
*, unsigned int, void *, size_t,
1716 sdb
= scsi_out(scmd
);
1717 dir
= DMA_TO_DEVICE
;
1718 func
= sg_pcopy_to_buffer
;
1720 sdb
= scsi_in(scmd
);
1721 dir
= DMA_FROM_DEVICE
;
1722 func
= sg_pcopy_from_buffer
;
1727 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
1730 block
= do_div(lba
, sdebug_store_sectors
);
1731 if (block
+ num
> sdebug_store_sectors
)
1732 rest
= block
+ num
- sdebug_store_sectors
;
1734 ret
= func(sdb
->table
.sgl
, sdb
->table
.nents
,
1735 fake_storep
+ (block
* scsi_debug_sector_size
),
1736 (num
- rest
) * scsi_debug_sector_size
, 0);
1737 if (ret
!= (num
- rest
) * scsi_debug_sector_size
)
1741 ret
+= func(sdb
->table
.sgl
, sdb
->table
.nents
,
1742 fake_storep
, rest
* scsi_debug_sector_size
,
1743 (num
- rest
) * scsi_debug_sector_size
);
1749 static __be16
dif_compute_csum(const void *buf
, int len
)
1753 if (scsi_debug_guard
)
1754 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
1756 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
1761 static int dif_verify(struct sd_dif_tuple
*sdt
, const void *data
,
1762 sector_t sector
, u32 ei_lba
)
1764 __be16 csum
= dif_compute_csum(data
, scsi_debug_sector_size
);
1766 if (sdt
->guard_tag
!= csum
) {
1767 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1769 (unsigned long)sector
,
1770 be16_to_cpu(sdt
->guard_tag
),
1774 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1775 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
1776 pr_err("%s: REF check failed on sector %lu\n",
1777 __func__
, (unsigned long)sector
);
1780 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1781 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
1782 pr_err("%s: REF check failed on sector %lu\n",
1783 __func__
, (unsigned long)sector
);
1789 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
1790 unsigned int sectors
, bool read
)
1792 unsigned int i
, resid
;
1793 struct scatterlist
*psgl
;
1795 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
1797 /* Bytes of protection data to copy into sgl */
1798 resid
= sectors
* sizeof(*dif_storep
);
1800 scsi_for_each_prot_sg(SCpnt
, psgl
, scsi_prot_sg_count(SCpnt
), i
) {
1801 int len
= min(psgl
->length
, resid
);
1802 void *start
= dif_store(sector
);
1805 if (dif_store_end
< start
+ len
)
1806 rest
= start
+ len
- dif_store_end
;
1808 paddr
= kmap_atomic(sg_page(psgl
)) + psgl
->offset
;
1811 memcpy(paddr
, start
, len
- rest
);
1813 memcpy(start
, paddr
, len
- rest
);
1817 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
1819 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
1822 sector
+= len
/ sizeof(*dif_storep
);
1824 kunmap_atomic(paddr
);
1828 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1829 unsigned int sectors
, u32 ei_lba
)
1832 struct sd_dif_tuple
*sdt
;
1835 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
1838 sector
= start_sec
+ i
;
1839 sdt
= dif_store(sector
);
1841 if (sdt
->app_tag
== cpu_to_be16(0xffff))
1844 ret
= dif_verify(sdt
, fake_store(sector
), sector
, ei_lba
);
1851 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
1857 static int resp_read(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
1858 unsigned int num
, struct sdebug_dev_info
*devip
,
1861 unsigned long iflags
;
1864 ret
= check_device_access_params(devip
, lba
, num
);
1868 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
1869 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
1870 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
1871 /* claim unrecoverable read error */
1872 mk_sense_buffer(devip
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
1873 /* set info field and valid bit for fixed descriptor */
1874 if (0x70 == (devip
->sense_buff
[0] & 0x7f)) {
1875 devip
->sense_buff
[0] |= 0x80; /* Valid bit */
1876 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
1877 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
1878 devip
->sense_buff
[3] = (ret
>> 24) & 0xff;
1879 devip
->sense_buff
[4] = (ret
>> 16) & 0xff;
1880 devip
->sense_buff
[5] = (ret
>> 8) & 0xff;
1881 devip
->sense_buff
[6] = ret
& 0xff;
1883 scsi_set_resid(SCpnt
, scsi_bufflen(SCpnt
));
1884 return check_condition_result
;
1888 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
1889 int prot_ret
= prot_verify_read(SCpnt
, lba
, num
, ei_lba
);
1892 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, prot_ret
);
1893 return illegal_condition_result
;
1897 read_lock_irqsave(&atomic_rw
, iflags
);
1898 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 0);
1899 read_unlock_irqrestore(&atomic_rw
, iflags
);
1901 return DID_ERROR
<< 16;
1903 scsi_in(SCpnt
)->resid
= scsi_bufflen(SCpnt
) - ret
;
1908 void dump_sector(unsigned char *buf
, int len
)
1912 printk(KERN_ERR
">>> Sector Dump <<<\n");
1914 for (i
= 0 ; i
< len
; i
+= 16) {
1915 printk(KERN_ERR
"%04d: ", i
);
1917 for (j
= 0 ; j
< 16 ; j
++) {
1918 unsigned char c
= buf
[i
+j
];
1919 if (c
>= 0x20 && c
< 0x7e)
1920 printk(" %c ", buf
[i
+j
]);
1922 printk("%02x ", buf
[i
+j
]);
1929 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1930 unsigned int sectors
, u32 ei_lba
)
1933 struct sd_dif_tuple
*sdt
;
1934 struct scatterlist
*dsgl
;
1935 struct scatterlist
*psgl
= scsi_prot_sglist(SCpnt
);
1936 void *daddr
, *paddr
;
1937 sector_t sector
= start_sec
;
1940 BUG_ON(scsi_sg_count(SCpnt
) == 0);
1941 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
1945 /* For each data page */
1946 scsi_for_each_sg(SCpnt
, dsgl
, scsi_sg_count(SCpnt
), i
) {
1947 daddr
= kmap_atomic(sg_page(dsgl
)) + dsgl
->offset
;
1948 paddr
= kmap_atomic(sg_page(psgl
)) + psgl
->offset
;
1950 /* For each sector-sized chunk in data page */
1951 for (j
= 0; j
< dsgl
->length
; j
+= scsi_debug_sector_size
) {
1953 /* If we're at the end of the current
1954 * protection page advance to the next one
1956 if (ppage_offset
>= psgl
->length
) {
1957 kunmap_atomic(paddr
);
1958 psgl
= sg_next(psgl
);
1959 BUG_ON(psgl
== NULL
);
1960 paddr
= kmap_atomic(sg_page(psgl
))
1965 sdt
= paddr
+ ppage_offset
;
1967 ret
= dif_verify(sdt
, daddr
+ j
, sector
, ei_lba
);
1969 dump_sector(daddr
+ j
, scsi_debug_sector_size
);
1975 ppage_offset
+= sizeof(struct sd_dif_tuple
);
1978 kunmap_atomic(paddr
);
1979 kunmap_atomic(daddr
);
1982 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
1989 kunmap_atomic(paddr
);
1990 kunmap_atomic(daddr
);
1994 static unsigned long lba_to_map_index(sector_t lba
)
1996 if (scsi_debug_unmap_alignment
) {
1997 lba
+= scsi_debug_unmap_granularity
-
1998 scsi_debug_unmap_alignment
;
2000 do_div(lba
, scsi_debug_unmap_granularity
);
2005 static sector_t
map_index_to_lba(unsigned long index
)
2007 sector_t lba
= index
* scsi_debug_unmap_granularity
;
2009 if (scsi_debug_unmap_alignment
) {
2010 lba
-= scsi_debug_unmap_granularity
-
2011 scsi_debug_unmap_alignment
;
2017 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2020 unsigned int mapped
;
2021 unsigned long index
;
2024 index
= lba_to_map_index(lba
);
2025 mapped
= test_bit(index
, map_storep
);
2028 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2030 next
= find_next_bit(map_storep
, map_size
, index
);
2032 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2038 static void map_region(sector_t lba
, unsigned int len
)
2040 sector_t end
= lba
+ len
;
2043 unsigned long index
= lba_to_map_index(lba
);
2045 if (index
< map_size
)
2046 set_bit(index
, map_storep
);
2048 lba
= map_index_to_lba(index
+ 1);
2052 static void unmap_region(sector_t lba
, unsigned int len
)
2054 sector_t end
= lba
+ len
;
2057 unsigned long index
= lba_to_map_index(lba
);
2059 if (lba
== map_index_to_lba(index
) &&
2060 lba
+ scsi_debug_unmap_granularity
<= end
&&
2062 clear_bit(index
, map_storep
);
2063 if (scsi_debug_lbprz
) {
2064 memset(fake_storep
+
2065 lba
* scsi_debug_sector_size
, 0,
2066 scsi_debug_sector_size
*
2067 scsi_debug_unmap_granularity
);
2070 memset(dif_storep
+ lba
, 0xff,
2071 sizeof(*dif_storep
) *
2072 scsi_debug_unmap_granularity
);
2075 lba
= map_index_to_lba(index
+ 1);
2079 static int resp_write(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
2080 unsigned int num
, struct sdebug_dev_info
*devip
,
2083 unsigned long iflags
;
2086 ret
= check_device_access_params(devip
, lba
, num
);
2091 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
2092 int prot_ret
= prot_verify_write(SCpnt
, lba
, num
, ei_lba
);
2095 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2096 return illegal_condition_result
;
2100 write_lock_irqsave(&atomic_rw
, iflags
);
2101 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 1);
2102 if (scsi_debug_lbp())
2103 map_region(lba
, num
);
2104 write_unlock_irqrestore(&atomic_rw
, iflags
);
2106 return (DID_ERROR
<< 16);
2107 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2108 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2109 printk(KERN_INFO
"scsi_debug: write: cdb indicated=%u, "
2110 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2115 static int resp_write_same(struct scsi_cmnd
*scmd
, unsigned long long lba
,
2116 unsigned int num
, struct sdebug_dev_info
*devip
,
2117 u32 ei_lba
, unsigned int unmap
)
2119 unsigned long iflags
;
2120 unsigned long long i
;
2123 ret
= check_device_access_params(devip
, lba
, num
);
2127 if (num
> scsi_debug_write_same_length
) {
2128 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2130 return check_condition_result
;
2133 write_lock_irqsave(&atomic_rw
, iflags
);
2135 if (unmap
&& scsi_debug_lbp()) {
2136 unmap_region(lba
, num
);
2140 /* Else fetch one logical block */
2141 ret
= fetch_to_dev_buffer(scmd
,
2142 fake_storep
+ (lba
* scsi_debug_sector_size
),
2143 scsi_debug_sector_size
);
2146 write_unlock_irqrestore(&atomic_rw
, iflags
);
2147 return (DID_ERROR
<< 16);
2148 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2149 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2150 printk(KERN_INFO
"scsi_debug: write same: cdb indicated=%u, "
2151 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2153 /* Copy first sector to remaining blocks */
2154 for (i
= 1 ; i
< num
; i
++)
2155 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
2156 fake_storep
+ (lba
* scsi_debug_sector_size
),
2157 scsi_debug_sector_size
);
2159 if (scsi_debug_lbp())
2160 map_region(lba
, num
);
2162 write_unlock_irqrestore(&atomic_rw
, iflags
);
2167 struct unmap_block_desc
{
2173 static int resp_unmap(struct scsi_cmnd
* scmd
, struct sdebug_dev_info
* devip
)
2176 struct unmap_block_desc
*desc
;
2177 unsigned int i
, payload_len
, descriptors
;
2180 ret
= check_readiness(scmd
, 1, devip
);
2184 payload_len
= get_unaligned_be16(&scmd
->cmnd
[7]);
2185 BUG_ON(scsi_bufflen(scmd
) != payload_len
);
2187 descriptors
= (payload_len
- 8) / 16;
2189 buf
= kmalloc(scsi_bufflen(scmd
), GFP_ATOMIC
);
2191 return check_condition_result
;
2193 scsi_sg_copy_to_buffer(scmd
, buf
, scsi_bufflen(scmd
));
2195 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
2196 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
2198 desc
= (void *)&buf
[8];
2200 for (i
= 0 ; i
< descriptors
; i
++) {
2201 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
2202 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
2204 ret
= check_device_access_params(devip
, lba
, num
);
2208 unmap_region(lba
, num
);
2219 #define SDEBUG_GET_LBA_STATUS_LEN 32
2221 static int resp_get_lba_status(struct scsi_cmnd
* scmd
,
2222 struct sdebug_dev_info
* devip
)
2224 unsigned long long lba
;
2225 unsigned int alloc_len
, mapped
, num
;
2226 unsigned char arr
[SDEBUG_GET_LBA_STATUS_LEN
];
2229 ret
= check_readiness(scmd
, 1, devip
);
2233 lba
= get_unaligned_be64(&scmd
->cmnd
[2]);
2234 alloc_len
= get_unaligned_be32(&scmd
->cmnd
[10]);
2239 ret
= check_device_access_params(devip
, lba
, 1);
2243 mapped
= map_state(lba
, &num
);
2245 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
2246 put_unaligned_be32(20, &arr
[0]); /* Parameter Data Length */
2247 put_unaligned_be64(lba
, &arr
[8]); /* LBA */
2248 put_unaligned_be32(num
, &arr
[16]); /* Number of blocks */
2249 arr
[20] = !mapped
; /* mapped = 0, unmapped = 1 */
2251 return fill_from_dev_buffer(scmd
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
2254 #define SDEBUG_RLUN_ARR_SZ 256
2256 static int resp_report_luns(struct scsi_cmnd
* scp
,
2257 struct sdebug_dev_info
* devip
)
2259 unsigned int alloc_len
;
2260 int lun_cnt
, i
, upper
, num
, n
, wlun
, lun
;
2261 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
2262 int select_report
= (int)cmd
[2];
2263 struct scsi_lun
*one_lun
;
2264 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
2265 unsigned char * max_addr
;
2267 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
2268 if ((alloc_len
< 4) || (select_report
> 2)) {
2269 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2271 return check_condition_result
;
2273 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2274 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
2275 lun_cnt
= scsi_debug_max_luns
;
2276 if (1 == select_report
)
2278 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
2280 wlun
= (select_report
> 0) ? 1 : 0;
2281 num
= lun_cnt
+ wlun
;
2282 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
2283 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
2284 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
2285 sizeof(struct scsi_lun
)), num
);
2290 one_lun
= (struct scsi_lun
*) &arr
[8];
2291 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
2292 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
2293 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
2295 upper
= (lun
>> 8) & 0x3f;
2297 one_lun
[i
].scsi_lun
[0] =
2298 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
2299 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
2302 one_lun
[i
].scsi_lun
[0] = (SAM2_WLUN_REPORT_LUNS
>> 8) & 0xff;
2303 one_lun
[i
].scsi_lun
[1] = SAM2_WLUN_REPORT_LUNS
& 0xff;
2306 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
2307 return fill_from_dev_buffer(scp
, arr
,
2308 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
2311 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
2312 unsigned int num
, struct sdebug_dev_info
*devip
)
2315 unsigned char *kaddr
, *buf
;
2316 unsigned int offset
;
2317 struct scatterlist
*sg
;
2318 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
2320 /* better not to use temporary buffer. */
2321 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
2323 mk_sense_buffer(devip
, NOT_READY
,
2324 LOGICAL_UNIT_COMMUNICATION_FAILURE
, 0);
2325 return check_condition_result
;
2328 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
2331 for_each_sg(sdb
->table
.sgl
, sg
, sdb
->table
.nents
, i
) {
2332 kaddr
= (unsigned char *)kmap_atomic(sg_page(sg
));
2336 for (j
= 0; j
< sg
->length
; j
++)
2337 *(kaddr
+ sg
->offset
+ j
) ^= *(buf
+ offset
+ j
);
2339 offset
+= sg
->length
;
2340 kunmap_atomic(kaddr
);
2349 /* When timer goes off this function is called. */
2350 static void timer_intr_handler(unsigned long indx
)
2352 struct sdebug_queued_cmd
* sqcp
;
2353 unsigned long iflags
;
2355 if (indx
>= scsi_debug_max_queue
) {
2356 printk(KERN_ERR
"scsi_debug:timer_intr_handler: indx too "
2360 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2361 sqcp
= &queued_arr
[(int)indx
];
2362 if (! sqcp
->in_use
) {
2363 printk(KERN_ERR
"scsi_debug:timer_intr_handler: Unexpected "
2365 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2369 if (sqcp
->done_funct
) {
2370 sqcp
->a_cmnd
->result
= sqcp
->scsi_result
;
2371 sqcp
->done_funct(sqcp
->a_cmnd
); /* callback to mid level */
2373 sqcp
->done_funct
= NULL
;
2374 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2378 static struct sdebug_dev_info
*
2379 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
2381 struct sdebug_dev_info
*devip
;
2383 devip
= kzalloc(sizeof(*devip
), flags
);
2385 devip
->sdbg_host
= sdbg_host
;
2386 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
2391 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
2393 struct sdebug_host_info
* sdbg_host
;
2394 struct sdebug_dev_info
* open_devip
= NULL
;
2395 struct sdebug_dev_info
* devip
=
2396 (struct sdebug_dev_info
*)sdev
->hostdata
;
2400 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
2402 printk(KERN_ERR
"Host info NULL\n");
2405 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
2406 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
2407 (devip
->target
== sdev
->id
) &&
2408 (devip
->lun
== sdev
->lun
))
2411 if ((!devip
->used
) && (!open_devip
))
2415 if (!open_devip
) { /* try and make a new one */
2416 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
2418 printk(KERN_ERR
"%s: out of memory at line %d\n",
2419 __func__
, __LINE__
);
2424 open_devip
->channel
= sdev
->channel
;
2425 open_devip
->target
= sdev
->id
;
2426 open_devip
->lun
= sdev
->lun
;
2427 open_devip
->sdbg_host
= sdbg_host
;
2428 open_devip
->reset
= 1;
2429 open_devip
->used
= 1;
2430 memset(open_devip
->sense_buff
, 0, SDEBUG_SENSE_LEN
);
2431 if (scsi_debug_dsense
)
2432 open_devip
->sense_buff
[0] = 0x72;
2434 open_devip
->sense_buff
[0] = 0x70;
2435 open_devip
->sense_buff
[7] = 0xa;
2437 if (sdev
->lun
== SAM2_WLUN_REPORT_LUNS
)
2438 open_devip
->wlun
= SAM2_WLUN_REPORT_LUNS
& 0xff;
2443 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
2445 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2446 printk(KERN_INFO
"scsi_debug: slave_alloc <%u %u %u %u>\n",
2447 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2448 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
2452 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
2454 struct sdebug_dev_info
*devip
;
2456 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2457 printk(KERN_INFO
"scsi_debug: slave_configure <%u %u %u %u>\n",
2458 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2459 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
2460 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
2461 devip
= devInfoReg(sdp
);
2463 return 1; /* no resources, will be marked offline */
2464 sdp
->hostdata
= devip
;
2465 if (sdp
->host
->cmd_per_lun
)
2466 scsi_adjust_queue_depth(sdp
, SDEBUG_TAGGED_QUEUING
,
2467 sdp
->host
->cmd_per_lun
);
2468 blk_queue_max_segment_size(sdp
->request_queue
, 256 * 1024);
2469 if (scsi_debug_no_uld
)
2470 sdp
->no_uld_attach
= 1;
2474 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
2476 struct sdebug_dev_info
*devip
=
2477 (struct sdebug_dev_info
*)sdp
->hostdata
;
2479 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2480 printk(KERN_INFO
"scsi_debug: slave_destroy <%u %u %u %u>\n",
2481 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2483 /* make this slot available for re-use */
2485 sdp
->hostdata
= NULL
;
2489 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2490 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
2492 unsigned long iflags
;
2494 struct sdebug_queued_cmd
*sqcp
;
2496 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2497 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2498 sqcp
= &queued_arr
[k
];
2499 if (sqcp
->in_use
&& (cmnd
== sqcp
->a_cmnd
)) {
2500 del_timer_sync(&sqcp
->cmnd_timer
);
2502 sqcp
->a_cmnd
= NULL
;
2506 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2507 return (k
< scsi_debug_max_queue
) ? 1 : 0;
2510 /* Deletes (stops) timers of all queued commands */
2511 static void stop_all_queued(void)
2513 unsigned long iflags
;
2515 struct sdebug_queued_cmd
*sqcp
;
2517 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2518 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2519 sqcp
= &queued_arr
[k
];
2520 if (sqcp
->in_use
&& sqcp
->a_cmnd
) {
2521 del_timer_sync(&sqcp
->cmnd_timer
);
2523 sqcp
->a_cmnd
= NULL
;
2526 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2529 static int scsi_debug_abort(struct scsi_cmnd
* SCpnt
)
2531 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2532 printk(KERN_INFO
"scsi_debug: abort\n");
2534 stop_queued_cmnd(SCpnt
);
2538 static int scsi_debug_biosparam(struct scsi_device
*sdev
,
2539 struct block_device
* bdev
, sector_t capacity
, int *info
)
2544 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2545 printk(KERN_INFO
"scsi_debug: biosparam\n");
2546 buf
= scsi_bios_ptable(bdev
);
2548 res
= scsi_partsize(buf
, capacity
,
2549 &info
[2], &info
[0], &info
[1]);
2554 info
[0] = sdebug_heads
;
2555 info
[1] = sdebug_sectors_per
;
2556 info
[2] = sdebug_cylinders_per
;
2560 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
2562 struct sdebug_dev_info
* devip
;
2564 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2565 printk(KERN_INFO
"scsi_debug: device_reset\n");
2568 devip
= devInfoReg(SCpnt
->device
);
2575 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
2577 struct sdebug_host_info
*sdbg_host
;
2578 struct sdebug_dev_info
* dev_info
;
2579 struct scsi_device
* sdp
;
2580 struct Scsi_Host
* hp
;
2582 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2583 printk(KERN_INFO
"scsi_debug: bus_reset\n");
2585 if (SCpnt
&& ((sdp
= SCpnt
->device
)) && ((hp
= sdp
->host
))) {
2586 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
2588 list_for_each_entry(dev_info
,
2589 &sdbg_host
->dev_info_list
,
2591 dev_info
->reset
= 1;
2597 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
2599 struct sdebug_host_info
* sdbg_host
;
2600 struct sdebug_dev_info
* dev_info
;
2602 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2603 printk(KERN_INFO
"scsi_debug: host_reset\n");
2605 spin_lock(&sdebug_host_list_lock
);
2606 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
2607 list_for_each_entry(dev_info
, &sdbg_host
->dev_info_list
,
2609 dev_info
->reset
= 1;
2611 spin_unlock(&sdebug_host_list_lock
);
2616 /* Initializes timers in queued array */
2617 static void __init
init_all_queued(void)
2619 unsigned long iflags
;
2621 struct sdebug_queued_cmd
* sqcp
;
2623 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2624 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2625 sqcp
= &queued_arr
[k
];
2626 init_timer(&sqcp
->cmnd_timer
);
2628 sqcp
->a_cmnd
= NULL
;
2630 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2633 static void __init
sdebug_build_parts(unsigned char *ramp
,
2634 unsigned long store_size
)
2636 struct partition
* pp
;
2637 int starts
[SDEBUG_MAX_PARTS
+ 2];
2638 int sectors_per_part
, num_sectors
, k
;
2639 int heads_by_sects
, start_sec
, end_sec
;
2641 /* assume partition table already zeroed */
2642 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
2644 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
2645 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
2646 printk(KERN_WARNING
"scsi_debug:build_parts: reducing "
2647 "partitions to %d\n", SDEBUG_MAX_PARTS
);
2649 num_sectors
= (int)sdebug_store_sectors
;
2650 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
2651 / scsi_debug_num_parts
;
2652 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
2653 starts
[0] = sdebug_sectors_per
;
2654 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
2655 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
2657 starts
[scsi_debug_num_parts
] = num_sectors
;
2658 starts
[scsi_debug_num_parts
+ 1] = 0;
2660 ramp
[510] = 0x55; /* magic partition markings */
2662 pp
= (struct partition
*)(ramp
+ 0x1be);
2663 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
2664 start_sec
= starts
[k
];
2665 end_sec
= starts
[k
+ 1] - 1;
2668 pp
->cyl
= start_sec
/ heads_by_sects
;
2669 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
2670 / sdebug_sectors_per
;
2671 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
2673 pp
->end_cyl
= end_sec
/ heads_by_sects
;
2674 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
2675 / sdebug_sectors_per
;
2676 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
2678 pp
->start_sect
= cpu_to_le32(start_sec
);
2679 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
2680 pp
->sys_ind
= 0x83; /* plain Linux partition */
2684 static int schedule_resp(struct scsi_cmnd
* cmnd
,
2685 struct sdebug_dev_info
* devip
,
2686 done_funct_t done
, int scsi_result
, int delta_jiff
)
2688 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmnd
) {
2690 struct scsi_device
* sdp
= cmnd
->device
;
2692 printk(KERN_INFO
"scsi_debug: <%u %u %u %u> "
2693 "non-zero result=0x%x\n", sdp
->host
->host_no
,
2694 sdp
->channel
, sdp
->id
, sdp
->lun
, scsi_result
);
2697 if (cmnd
&& devip
) {
2698 /* simulate autosense by this driver */
2699 if (SAM_STAT_CHECK_CONDITION
== (scsi_result
& 0xff))
2700 memcpy(cmnd
->sense_buffer
, devip
->sense_buff
,
2701 (SCSI_SENSE_BUFFERSIZE
> SDEBUG_SENSE_LEN
) ?
2702 SDEBUG_SENSE_LEN
: SCSI_SENSE_BUFFERSIZE
);
2704 if (delta_jiff
<= 0) {
2706 cmnd
->result
= scsi_result
;
2711 unsigned long iflags
;
2713 struct sdebug_queued_cmd
* sqcp
= NULL
;
2715 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2716 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2717 sqcp
= &queued_arr
[k
];
2721 if (k
>= scsi_debug_max_queue
) {
2722 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2723 printk(KERN_WARNING
"scsi_debug: can_queue exceeded\n");
2724 return 1; /* report busy to mid level */
2727 sqcp
->a_cmnd
= cmnd
;
2728 sqcp
->scsi_result
= scsi_result
;
2729 sqcp
->done_funct
= done
;
2730 sqcp
->cmnd_timer
.function
= timer_intr_handler
;
2731 sqcp
->cmnd_timer
.data
= k
;
2732 sqcp
->cmnd_timer
.expires
= jiffies
+ delta_jiff
;
2733 add_timer(&sqcp
->cmnd_timer
);
2734 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2740 /* Note: The following macros create attribute files in the
2741 /sys/module/scsi_debug/parameters directory. Unfortunately this
2742 driver is unaware of a change and cannot trigger auxiliary actions
2743 as it can when the corresponding attribute in the
2744 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2746 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
2747 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
2748 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
2749 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
2750 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
2751 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
2752 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
2753 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
2754 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
2755 module_param_named(guard
, scsi_debug_guard
, uint
, S_IRUGO
);
2756 module_param_named(lbpu
, scsi_debug_lbpu
, int, S_IRUGO
);
2757 module_param_named(lbpws
, scsi_debug_lbpws
, int, S_IRUGO
);
2758 module_param_named(lbpws10
, scsi_debug_lbpws10
, int, S_IRUGO
);
2759 module_param_named(lbprz
, scsi_debug_lbprz
, int, S_IRUGO
);
2760 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
2761 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
2762 module_param_named(max_queue
, scsi_debug_max_queue
, int, S_IRUGO
| S_IWUSR
);
2763 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
2764 module_param_named(no_uld
, scsi_debug_no_uld
, int, S_IRUGO
);
2765 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
2766 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
2767 module_param_named(opt_blks
, scsi_debug_opt_blks
, int, S_IRUGO
);
2768 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
2769 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
2770 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
2771 module_param_named(removable
, scsi_debug_removable
, bool, S_IRUGO
| S_IWUSR
);
2772 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
2773 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
2774 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
2775 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
2776 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
2777 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
2778 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
2779 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
2781 module_param_named(write_same_length
, scsi_debug_write_same_length
, int,
2784 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2785 MODULE_DESCRIPTION("SCSI debug adapter driver");
2786 MODULE_LICENSE("GPL");
2787 MODULE_VERSION(SCSI_DEBUG_VERSION
);
2789 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
2790 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
2791 MODULE_PARM_DESC(delay
, "# of jiffies to delay response(def=1)");
2792 MODULE_PARM_DESC(dev_size_mb
, "size in MB of ram shared by devs(def=8)");
2793 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
2794 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
2795 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
2796 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
2797 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
2798 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
2799 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
2800 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2801 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2802 MODULE_PARM_DESC(lbprz
, "unmapped blocks return 0 on read (def=1)");
2803 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
2804 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
2805 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to 255(def))");
2806 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
2807 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
2808 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
2809 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
2810 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in block (def=64)");
2811 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2812 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
2813 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
2814 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
2815 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=5[SPC-3])");
2816 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
2817 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
2818 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
2819 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2820 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
2821 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2822 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2823 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2825 static char sdebug_info
[256];
2827 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
2829 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
2830 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
2831 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
2836 /* scsi_debug_proc_info
2837 * Used if the driver currently has no own support for /proc/scsi
2839 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
, int length
)
2843 int minLen
= length
> 15 ? 15 : length
;
2845 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2847 memcpy(arr
, buffer
, minLen
);
2849 if (1 != sscanf(arr
, "%d", &opts
))
2851 scsi_debug_opts
= opts
;
2852 if (scsi_debug_every_nth
!= 0)
2853 scsi_debug_cmnd_count
= 0;
2857 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
2859 seq_printf(m
, "scsi_debug adapter driver, version "
2861 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2862 "every_nth=%d(curr:%d)\n"
2863 "delay=%d, max_luns=%d, scsi_level=%d\n"
2864 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2865 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2866 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2867 SCSI_DEBUG_VERSION
, scsi_debug_version_date
, scsi_debug_num_tgts
,
2868 scsi_debug_dev_size_mb
, scsi_debug_opts
, scsi_debug_every_nth
,
2869 scsi_debug_cmnd_count
, scsi_debug_delay
,
2870 scsi_debug_max_luns
, scsi_debug_scsi_level
,
2871 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
2872 sdebug_sectors_per
, num_aborts
, num_dev_resets
, num_bus_resets
,
2873 num_host_resets
, dix_reads
, dix_writes
, dif_errors
);
2877 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
2879 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
2882 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
2888 if (1 == sscanf(buf
, "%10s", work
)) {
2889 if ((1 == sscanf(work
, "%d", &delay
)) && (delay
>= 0)) {
2890 scsi_debug_delay
= delay
;
2896 static DRIVER_ATTR_RW(delay
);
2898 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
2900 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
2903 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
2909 if (1 == sscanf(buf
, "%10s", work
)) {
2910 if (0 == strnicmp(work
,"0x", 2)) {
2911 if (1 == sscanf(&work
[2], "%x", &opts
))
2914 if (1 == sscanf(work
, "%d", &opts
))
2920 scsi_debug_opts
= opts
;
2921 scsi_debug_cmnd_count
= 0;
2924 static DRIVER_ATTR_RW(opts
);
2926 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
2928 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
2930 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
2935 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2936 scsi_debug_ptype
= n
;
2941 static DRIVER_ATTR_RW(ptype
);
2943 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
2945 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
2947 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
2952 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2953 scsi_debug_dsense
= n
;
2958 static DRIVER_ATTR_RW(dsense
);
2960 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
2962 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
2964 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
2969 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2970 scsi_debug_fake_rw
= n
;
2975 static DRIVER_ATTR_RW(fake_rw
);
2977 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
2979 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
2981 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
2986 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2987 scsi_debug_no_lun_0
= n
;
2992 static DRIVER_ATTR_RW(no_lun_0
);
2994 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
2996 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
2998 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
3003 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3004 scsi_debug_num_tgts
= n
;
3005 sdebug_max_tgts_luns();
3010 static DRIVER_ATTR_RW(num_tgts
);
3012 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
3014 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
3016 static DRIVER_ATTR_RO(dev_size_mb
);
3018 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
3020 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
3022 static DRIVER_ATTR_RO(num_parts
);
3024 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
3026 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
3028 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
3033 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
3034 scsi_debug_every_nth
= nth
;
3035 scsi_debug_cmnd_count
= 0;
3040 static DRIVER_ATTR_RW(every_nth
);
3042 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
3044 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
3046 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
3051 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3052 scsi_debug_max_luns
= n
;
3053 sdebug_max_tgts_luns();
3058 static DRIVER_ATTR_RW(max_luns
);
3060 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
3062 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_queue
);
3064 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
3069 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
3070 (n
<= SCSI_DEBUG_CANQUEUE
)) {
3071 scsi_debug_max_queue
= n
;
3076 static DRIVER_ATTR_RW(max_queue
);
3078 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
3080 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_uld
);
3082 static DRIVER_ATTR_RO(no_uld
);
3084 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
3086 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
3088 static DRIVER_ATTR_RO(scsi_level
);
3090 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
3092 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
3094 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
3099 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3100 scsi_debug_virtual_gb
= n
;
3102 sdebug_capacity
= get_sdebug_capacity();
3108 static DRIVER_ATTR_RW(virtual_gb
);
3110 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
3112 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
3115 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
3120 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
3122 if (delta_hosts
> 0) {
3124 sdebug_add_adapter();
3125 } while (--delta_hosts
);
3126 } else if (delta_hosts
< 0) {
3128 sdebug_remove_adapter();
3129 } while (++delta_hosts
);
3133 static DRIVER_ATTR_RW(add_host
);
3135 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
3137 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
3139 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
3144 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3145 scsi_debug_vpd_use_hostno
= n
;
3150 static DRIVER_ATTR_RW(vpd_use_hostno
);
3152 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
3154 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
3156 static DRIVER_ATTR_RO(sector_size
);
3158 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
3160 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
3162 static DRIVER_ATTR_RO(dix
);
3164 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
3166 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
3168 static DRIVER_ATTR_RO(dif
);
3170 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
3172 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_guard
);
3174 static DRIVER_ATTR_RO(guard
);
3176 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
3178 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
3180 static DRIVER_ATTR_RO(ato
);
3182 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
3186 if (!scsi_debug_lbp())
3187 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
3188 sdebug_store_sectors
);
3190 count
= bitmap_scnlistprintf(buf
, PAGE_SIZE
, map_storep
, map_size
);
3192 buf
[count
++] = '\n';
3197 static DRIVER_ATTR_RO(map
);
3199 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
3201 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_removable
? 1 : 0);
3203 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
3208 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3209 scsi_debug_removable
= (n
> 0);
3214 static DRIVER_ATTR_RW(removable
);
3216 /* Note: The following array creates attribute files in the
3217 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3218 files (over those found in the /sys/module/scsi_debug/parameters
3219 directory) is that auxiliary actions can be triggered when an attribute
3220 is changed. For example see: sdebug_add_host_store() above.
3223 static struct attribute
*sdebug_drv_attrs
[] = {
3224 &driver_attr_delay
.attr
,
3225 &driver_attr_opts
.attr
,
3226 &driver_attr_ptype
.attr
,
3227 &driver_attr_dsense
.attr
,
3228 &driver_attr_fake_rw
.attr
,
3229 &driver_attr_no_lun_0
.attr
,
3230 &driver_attr_num_tgts
.attr
,
3231 &driver_attr_dev_size_mb
.attr
,
3232 &driver_attr_num_parts
.attr
,
3233 &driver_attr_every_nth
.attr
,
3234 &driver_attr_max_luns
.attr
,
3235 &driver_attr_max_queue
.attr
,
3236 &driver_attr_no_uld
.attr
,
3237 &driver_attr_scsi_level
.attr
,
3238 &driver_attr_virtual_gb
.attr
,
3239 &driver_attr_add_host
.attr
,
3240 &driver_attr_vpd_use_hostno
.attr
,
3241 &driver_attr_sector_size
.attr
,
3242 &driver_attr_dix
.attr
,
3243 &driver_attr_dif
.attr
,
3244 &driver_attr_guard
.attr
,
3245 &driver_attr_ato
.attr
,
3246 &driver_attr_map
.attr
,
3247 &driver_attr_removable
.attr
,
3250 ATTRIBUTE_GROUPS(sdebug_drv
);
3252 static struct device
*pseudo_primary
;
3254 static int __init
scsi_debug_init(void)
3261 switch (scsi_debug_sector_size
) {
3268 printk(KERN_ERR
"scsi_debug_init: invalid sector_size %d\n",
3269 scsi_debug_sector_size
);
3273 switch (scsi_debug_dif
) {
3275 case SD_DIF_TYPE0_PROTECTION
:
3276 case SD_DIF_TYPE1_PROTECTION
:
3277 case SD_DIF_TYPE2_PROTECTION
:
3278 case SD_DIF_TYPE3_PROTECTION
:
3282 printk(KERN_ERR
"scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3286 if (scsi_debug_guard
> 1) {
3287 printk(KERN_ERR
"scsi_debug_init: guard must be 0 or 1\n");
3291 if (scsi_debug_ato
> 1) {
3292 printk(KERN_ERR
"scsi_debug_init: ato must be 0 or 1\n");
3296 if (scsi_debug_physblk_exp
> 15) {
3297 printk(KERN_ERR
"scsi_debug_init: invalid physblk_exp %u\n",
3298 scsi_debug_physblk_exp
);
3302 if (scsi_debug_lowest_aligned
> 0x3fff) {
3303 printk(KERN_ERR
"scsi_debug_init: lowest_aligned too big: %u\n",
3304 scsi_debug_lowest_aligned
);
3308 if (scsi_debug_dev_size_mb
< 1)
3309 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
3310 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
3311 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
3312 sdebug_capacity
= get_sdebug_capacity();
3314 /* play around with geometry, don't waste too much on track 0 */
3316 sdebug_sectors_per
= 32;
3317 if (scsi_debug_dev_size_mb
>= 16)
3319 else if (scsi_debug_dev_size_mb
>= 256)
3321 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3322 (sdebug_sectors_per
* sdebug_heads
);
3323 if (sdebug_cylinders_per
>= 1024) {
3324 /* other LLDs do this; implies >= 1GB ram disk ... */
3326 sdebug_sectors_per
= 63;
3327 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3328 (sdebug_sectors_per
* sdebug_heads
);
3331 fake_storep
= vmalloc(sz
);
3332 if (NULL
== fake_storep
) {
3333 printk(KERN_ERR
"scsi_debug_init: out of memory, 1\n");
3336 memset(fake_storep
, 0, sz
);
3337 if (scsi_debug_num_parts
> 0)
3338 sdebug_build_parts(fake_storep
, sz
);
3340 if (scsi_debug_dix
) {
3343 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
3344 dif_storep
= vmalloc(dif_size
);
3346 printk(KERN_ERR
"scsi_debug_init: dif_storep %u bytes @ %p\n",
3347 dif_size
, dif_storep
);
3349 if (dif_storep
== NULL
) {
3350 printk(KERN_ERR
"scsi_debug_init: out of mem. (DIX)\n");
3355 memset(dif_storep
, 0xff, dif_size
);
3358 /* Logical Block Provisioning */
3359 if (scsi_debug_lbp()) {
3360 scsi_debug_unmap_max_blocks
=
3361 clamp(scsi_debug_unmap_max_blocks
, 0U, 0xffffffffU
);
3363 scsi_debug_unmap_max_desc
=
3364 clamp(scsi_debug_unmap_max_desc
, 0U, 256U);
3366 scsi_debug_unmap_granularity
=
3367 clamp(scsi_debug_unmap_granularity
, 1U, 0xffffffffU
);
3369 if (scsi_debug_unmap_alignment
&&
3370 scsi_debug_unmap_granularity
<=
3371 scsi_debug_unmap_alignment
) {
3373 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3378 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
3379 map_storep
= vmalloc(BITS_TO_LONGS(map_size
) * sizeof(long));
3381 printk(KERN_INFO
"scsi_debug_init: %lu provisioning blocks\n",
3384 if (map_storep
== NULL
) {
3385 printk(KERN_ERR
"scsi_debug_init: out of mem. (MAP)\n");
3390 bitmap_zero(map_storep
, map_size
);
3392 /* Map first 1KB for partition table */
3393 if (scsi_debug_num_parts
)
3397 pseudo_primary
= root_device_register("pseudo_0");
3398 if (IS_ERR(pseudo_primary
)) {
3399 printk(KERN_WARNING
"scsi_debug: root_device_register() error\n");
3400 ret
= PTR_ERR(pseudo_primary
);
3403 ret
= bus_register(&pseudo_lld_bus
);
3405 printk(KERN_WARNING
"scsi_debug: bus_register error: %d\n",
3409 ret
= driver_register(&sdebug_driverfs_driver
);
3411 printk(KERN_WARNING
"scsi_debug: driver_register error: %d\n",
3418 host_to_add
= scsi_debug_add_host
;
3419 scsi_debug_add_host
= 0;
3421 for (k
= 0; k
< host_to_add
; k
++) {
3422 if (sdebug_add_adapter()) {
3423 printk(KERN_ERR
"scsi_debug_init: "
3424 "sdebug_add_adapter failed k=%d\n", k
);
3429 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
3430 printk(KERN_INFO
"scsi_debug_init: built %d host(s)\n",
3431 scsi_debug_add_host
);
3436 bus_unregister(&pseudo_lld_bus
);
3438 root_device_unregister(pseudo_primary
);
3449 static void __exit
scsi_debug_exit(void)
3451 int k
= scsi_debug_add_host
;
3455 sdebug_remove_adapter();
3456 driver_unregister(&sdebug_driverfs_driver
);
3457 bus_unregister(&pseudo_lld_bus
);
3458 root_device_unregister(pseudo_primary
);
3466 device_initcall(scsi_debug_init
);
3467 module_exit(scsi_debug_exit
);
3469 static void sdebug_release_adapter(struct device
* dev
)
3471 struct sdebug_host_info
*sdbg_host
;
3473 sdbg_host
= to_sdebug_host(dev
);
3477 static int sdebug_add_adapter(void)
3479 int k
, devs_per_host
;
3481 struct sdebug_host_info
*sdbg_host
;
3482 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3484 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
3485 if (NULL
== sdbg_host
) {
3486 printk(KERN_ERR
"%s: out of memory at line %d\n",
3487 __func__
, __LINE__
);
3491 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
3493 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
3494 for (k
= 0; k
< devs_per_host
; k
++) {
3495 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
3496 if (!sdbg_devinfo
) {
3497 printk(KERN_ERR
"%s: out of memory at line %d\n",
3498 __func__
, __LINE__
);
3504 spin_lock(&sdebug_host_list_lock
);
3505 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
3506 spin_unlock(&sdebug_host_list_lock
);
3508 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
3509 sdbg_host
->dev
.parent
= pseudo_primary
;
3510 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
3511 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
3513 error
= device_register(&sdbg_host
->dev
);
3518 ++scsi_debug_add_host
;
3522 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3524 list_del(&sdbg_devinfo
->dev_list
);
3525 kfree(sdbg_devinfo
);
3532 static void sdebug_remove_adapter(void)
3534 struct sdebug_host_info
* sdbg_host
= NULL
;
3536 spin_lock(&sdebug_host_list_lock
);
3537 if (!list_empty(&sdebug_host_list
)) {
3538 sdbg_host
= list_entry(sdebug_host_list
.prev
,
3539 struct sdebug_host_info
, host_list
);
3540 list_del(&sdbg_host
->host_list
);
3542 spin_unlock(&sdebug_host_list_lock
);
3547 device_unregister(&sdbg_host
->dev
);
3548 --scsi_debug_add_host
;
3552 int scsi_debug_queuecommand_lck(struct scsi_cmnd
*SCpnt
, done_funct_t done
)
3554 unsigned char *cmd
= (unsigned char *) SCpnt
->cmnd
;
3557 unsigned long long lba
;
3560 int target
= SCpnt
->device
->id
;
3561 struct sdebug_dev_info
*devip
= NULL
;
3562 int inj_recovered
= 0;
3563 int inj_transport
= 0;
3566 int delay_override
= 0;
3569 scsi_set_resid(SCpnt
, 0);
3570 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmd
) {
3571 printk(KERN_INFO
"scsi_debug: cmd ");
3572 for (k
= 0, len
= SCpnt
->cmd_len
; k
< len
; ++k
)
3573 printk("%02x ", (int)cmd
[k
]);
3577 if (target
== SCpnt
->device
->host
->hostt
->this_id
) {
3578 printk(KERN_INFO
"scsi_debug: initiator's id used as "
3580 return schedule_resp(SCpnt
, NULL
, done
,
3581 DID_NO_CONNECT
<< 16, 0);
3584 if ((SCpnt
->device
->lun
>= scsi_debug_max_luns
) &&
3585 (SCpnt
->device
->lun
!= SAM2_WLUN_REPORT_LUNS
))
3586 return schedule_resp(SCpnt
, NULL
, done
,
3587 DID_NO_CONNECT
<< 16, 0);
3588 devip
= devInfoReg(SCpnt
->device
);
3590 return schedule_resp(SCpnt
, NULL
, done
,
3591 DID_NO_CONNECT
<< 16, 0);
3593 if ((scsi_debug_every_nth
!= 0) &&
3594 (++scsi_debug_cmnd_count
>= abs(scsi_debug_every_nth
))) {
3595 scsi_debug_cmnd_count
= 0;
3596 if (scsi_debug_every_nth
< -1)
3597 scsi_debug_every_nth
= -1;
3598 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
3599 return 0; /* ignore command causing timeout */
3600 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT
& scsi_debug_opts
&&
3601 scsi_medium_access_command(SCpnt
))
3602 return 0; /* time out reads and writes */
3603 else if (SCSI_DEBUG_OPT_RECOVERED_ERR
& scsi_debug_opts
)
3604 inj_recovered
= 1; /* to reads and writes below */
3605 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& scsi_debug_opts
)
3606 inj_transport
= 1; /* to reads and writes below */
3607 else if (SCSI_DEBUG_OPT_DIF_ERR
& scsi_debug_opts
)
3608 inj_dif
= 1; /* to reads and writes below */
3609 else if (SCSI_DEBUG_OPT_DIX_ERR
& scsi_debug_opts
)
3610 inj_dix
= 1; /* to reads and writes below */
3617 case TEST_UNIT_READY
:
3619 break; /* only allowable wlun commands */
3621 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3622 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x "
3623 "not supported for wlun\n", *cmd
);
3624 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3626 errsts
= check_condition_result
;
3627 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3633 case INQUIRY
: /* mandatory, ignore unit attention */
3635 errsts
= resp_inquiry(SCpnt
, target
, devip
);
3637 case REQUEST_SENSE
: /* mandatory, ignore unit attention */
3639 errsts
= resp_requests(SCpnt
, devip
);
3641 case REZERO_UNIT
: /* actually this is REWIND for SSC */
3643 errsts
= resp_start_stop(SCpnt
, devip
);
3645 case ALLOW_MEDIUM_REMOVAL
:
3646 errsts
= check_readiness(SCpnt
, 1, devip
);
3649 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3650 printk(KERN_INFO
"scsi_debug: Medium removal %s\n",
3651 cmd
[4] ? "inhibited" : "enabled");
3653 case SEND_DIAGNOSTIC
: /* mandatory */
3654 errsts
= check_readiness(SCpnt
, 1, devip
);
3656 case TEST_UNIT_READY
: /* mandatory */
3658 errsts
= check_readiness(SCpnt
, 0, devip
);
3661 errsts
= check_readiness(SCpnt
, 1, devip
);
3664 errsts
= check_readiness(SCpnt
, 1, devip
);
3667 errsts
= check_readiness(SCpnt
, 1, devip
);
3670 errsts
= check_readiness(SCpnt
, 1, devip
);
3673 errsts
= resp_readcap(SCpnt
, devip
);
3675 case SERVICE_ACTION_IN
:
3676 if (cmd
[1] == SAI_READ_CAPACITY_16
)
3677 errsts
= resp_readcap16(SCpnt
, devip
);
3678 else if (cmd
[1] == SAI_GET_LBA_STATUS
) {
3680 if (scsi_debug_lbp() == 0) {
3681 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3682 INVALID_COMMAND_OPCODE
, 0);
3683 errsts
= check_condition_result
;
3685 errsts
= resp_get_lba_status(SCpnt
, devip
);
3687 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3689 errsts
= check_condition_result
;
3692 case MAINTENANCE_IN
:
3693 if (MI_REPORT_TARGET_PGS
!= cmd
[1]) {
3694 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3696 errsts
= check_condition_result
;
3699 errsts
= resp_report_tgtpgs(SCpnt
, devip
);
3704 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3705 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3707 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3708 INVALID_COMMAND_OPCODE
, 0);
3709 errsts
= check_condition_result
;
3713 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3714 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3715 (cmd
[1] & 0xe0) == 0)
3716 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3721 errsts
= check_readiness(SCpnt
, 0, devip
);
3724 if (scsi_debug_fake_rw
)
3726 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3727 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3728 if (inj_recovered
&& (0 == errsts
)) {
3729 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3730 THRESHOLD_EXCEEDED
, 0);
3731 errsts
= check_condition_result
;
3732 } else if (inj_transport
&& (0 == errsts
)) {
3733 mk_sense_buffer(devip
, ABORTED_COMMAND
,
3734 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
3735 errsts
= check_condition_result
;
3736 } else if (inj_dif
&& (0 == errsts
)) {
3737 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3738 errsts
= illegal_condition_result
;
3739 } else if (inj_dix
&& (0 == errsts
)) {
3740 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3741 errsts
= illegal_condition_result
;
3744 case REPORT_LUNS
: /* mandatory, ignore unit attention */
3746 errsts
= resp_report_luns(SCpnt
, devip
);
3748 case VERIFY
: /* 10 byte SBC-2 command */
3749 errsts
= check_readiness(SCpnt
, 0, devip
);
3754 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3755 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3757 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3758 INVALID_COMMAND_OPCODE
, 0);
3759 errsts
= check_condition_result
;
3763 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3764 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3765 (cmd
[1] & 0xe0) == 0)
3766 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3771 errsts
= check_readiness(SCpnt
, 0, devip
);
3774 if (scsi_debug_fake_rw
)
3776 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3777 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3778 if (inj_recovered
&& (0 == errsts
)) {
3779 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3780 THRESHOLD_EXCEEDED
, 0);
3781 errsts
= check_condition_result
;
3782 } else if (inj_dif
&& (0 == errsts
)) {
3783 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3784 errsts
= illegal_condition_result
;
3785 } else if (inj_dix
&& (0 == errsts
)) {
3786 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3787 errsts
= illegal_condition_result
;
3793 if ((*cmd
== WRITE_SAME_16
&& scsi_debug_lbpws
== 0) ||
3794 (*cmd
== WRITE_SAME
&& scsi_debug_lbpws10
== 0)) {
3795 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3796 INVALID_FIELD_IN_CDB
, 0);
3797 errsts
= check_condition_result
;
3803 errsts
= check_readiness(SCpnt
, 0, devip
);
3806 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3807 errsts
= resp_write_same(SCpnt
, lba
, num
, devip
, ei_lba
, unmap
);
3810 errsts
= check_readiness(SCpnt
, 0, devip
);
3814 if (scsi_debug_unmap_max_desc
== 0 || scsi_debug_lbpu
== 0) {
3815 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3816 INVALID_COMMAND_OPCODE
, 0);
3817 errsts
= check_condition_result
;
3819 errsts
= resp_unmap(SCpnt
, devip
);
3823 errsts
= resp_mode_sense(SCpnt
, target
, devip
);
3826 errsts
= resp_mode_select(SCpnt
, 1, devip
);
3828 case MODE_SELECT_10
:
3829 errsts
= resp_mode_select(SCpnt
, 0, devip
);
3832 errsts
= resp_log_sense(SCpnt
, devip
);
3834 case SYNCHRONIZE_CACHE
:
3836 errsts
= check_readiness(SCpnt
, 0, devip
);
3839 errsts
= check_readiness(SCpnt
, 1, devip
);
3841 case XDWRITEREAD_10
:
3842 if (!scsi_bidi_cmnd(SCpnt
)) {
3843 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3844 INVALID_FIELD_IN_CDB
, 0);
3845 errsts
= check_condition_result
;
3849 errsts
= check_readiness(SCpnt
, 0, devip
);
3852 if (scsi_debug_fake_rw
)
3854 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3855 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3858 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3861 errsts
= resp_xdwriteread(SCpnt
, lba
, num
, devip
);
3863 case VARIABLE_LENGTH_CMD
:
3864 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
) {
3866 if ((cmd
[10] & 0xe0) == 0)
3868 "Unprotected RD/WR to DIF device\n");
3870 if (cmd
[9] == READ_32
) {
3871 BUG_ON(SCpnt
->cmd_len
< 32);
3875 if (cmd
[9] == WRITE_32
) {
3876 BUG_ON(SCpnt
->cmd_len
< 32);
3881 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3882 INVALID_FIELD_IN_CDB
, 0);
3883 errsts
= check_condition_result
;
3887 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3888 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x not "
3889 "supported\n", *cmd
);
3890 errsts
= check_readiness(SCpnt
, 1, devip
);
3892 break; /* Unit attention takes precedence */
3893 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
3894 errsts
= check_condition_result
;
3897 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3898 (delay_override
? 0 : scsi_debug_delay
));
3901 static DEF_SCSI_QCMD(scsi_debug_queuecommand
)
3903 static struct scsi_host_template sdebug_driver_template
= {
3904 .show_info
= scsi_debug_show_info
,
3905 .write_info
= scsi_debug_write_info
,
3906 .proc_name
= sdebug_proc_name
,
3907 .name
= "SCSI DEBUG",
3908 .info
= scsi_debug_info
,
3909 .slave_alloc
= scsi_debug_slave_alloc
,
3910 .slave_configure
= scsi_debug_slave_configure
,
3911 .slave_destroy
= scsi_debug_slave_destroy
,
3912 .ioctl
= scsi_debug_ioctl
,
3913 .queuecommand
= scsi_debug_queuecommand
,
3914 .eh_abort_handler
= scsi_debug_abort
,
3915 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
3916 .eh_device_reset_handler
= scsi_debug_device_reset
,
3917 .eh_host_reset_handler
= scsi_debug_host_reset
,
3918 .bios_param
= scsi_debug_biosparam
,
3919 .can_queue
= SCSI_DEBUG_CANQUEUE
,
3921 .sg_tablesize
= 256,
3923 .max_sectors
= 0xffff,
3924 .use_clustering
= DISABLE_CLUSTERING
,
3925 .module
= THIS_MODULE
,
3928 static int sdebug_driver_probe(struct device
* dev
)
3931 struct sdebug_host_info
*sdbg_host
;
3932 struct Scsi_Host
*hpnt
;
3935 sdbg_host
= to_sdebug_host(dev
);
3937 sdebug_driver_template
.can_queue
= scsi_debug_max_queue
;
3938 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
3940 printk(KERN_ERR
"%s: scsi_register failed\n", __func__
);
3945 sdbg_host
->shost
= hpnt
;
3946 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
3947 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
3948 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
3950 hpnt
->max_id
= scsi_debug_num_tgts
;
3951 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
; /* = scsi_debug_max_luns; */
3955 switch (scsi_debug_dif
) {
3957 case SD_DIF_TYPE1_PROTECTION
:
3958 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
3960 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
3963 case SD_DIF_TYPE2_PROTECTION
:
3964 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
3966 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
3969 case SD_DIF_TYPE3_PROTECTION
:
3970 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
3972 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
3977 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
3981 scsi_host_set_prot(hpnt
, host_prot
);
3983 printk(KERN_INFO
"scsi_debug: host protection%s%s%s%s%s%s%s\n",
3984 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
3985 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
3986 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
3987 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
3988 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
3989 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
3990 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
3992 if (scsi_debug_guard
== 1)
3993 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
3995 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
3997 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
3999 printk(KERN_ERR
"%s: scsi_add_host failed\n", __func__
);
4001 scsi_host_put(hpnt
);
4003 scsi_scan_host(hpnt
);
4009 static int sdebug_driver_remove(struct device
* dev
)
4011 struct sdebug_host_info
*sdbg_host
;
4012 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
4014 sdbg_host
= to_sdebug_host(dev
);
4017 printk(KERN_ERR
"%s: Unable to locate host info\n",
4022 scsi_remove_host(sdbg_host
->shost
);
4024 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
4026 list_del(&sdbg_devinfo
->dev_list
);
4027 kfree(sdbg_devinfo
);
4030 scsi_host_put(sdbg_host
->shost
);
4034 static int pseudo_lld_bus_match(struct device
*dev
,
4035 struct device_driver
*dev_driver
)
4040 static struct bus_type pseudo_lld_bus
= {
4042 .match
= pseudo_lld_bus_match
,
4043 .probe
= sdebug_driver_probe
,
4044 .remove
= sdebug_driver_remove
,
4045 .drv_groups
= sdebug_drv_groups
,