2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2016 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "1.86"
64 static const char *sdebug_version_date
= "20160430";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 /* Additional Sense Code Qualifier (ASCQ) */
97 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
144 #define SDEBUG_LUN_0_VAL 0
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE 1
148 #define SDEBUG_OPT_MEDIUM_ERR 2
149 #define SDEBUG_OPT_TIMEOUT 4
150 #define SDEBUG_OPT_RECOVERED_ERR 8
151 #define SDEBUG_OPT_TRANSPORT_ERR 16
152 #define SDEBUG_OPT_DIF_ERR 32
153 #define SDEBUG_OPT_DIX_ERR 64
154 #define SDEBUG_OPT_MAC_TIMEOUT 128
155 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
156 #define SDEBUG_OPT_Q_NOISE 0x200
157 #define SDEBUG_OPT_ALL_TSF 0x400
158 #define SDEBUG_OPT_RARE_TSF 0x800
159 #define SDEBUG_OPT_N_WCE 0x1000
160 #define SDEBUG_OPT_RESET_NOISE 0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
162 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
163 SDEBUG_OPT_RESET_NOISE)
164 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
165 SDEBUG_OPT_TRANSPORT_ERR | \
166 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
167 SDEBUG_OPT_SHORT_TRANSFER)
168 /* When "every_nth" > 0 then modulo "every_nth" commands:
169 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
170 * - a RECOVERED_ERROR is simulated on successful read and write
171 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
172 * - a TRANSPORT_ERROR is simulated on successful read and write
173 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
175 * When "every_nth" < 0 then after "- every_nth" commands:
176 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
177 * - a RECOVERED_ERROR is simulated on successful read and write
178 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
179 * - a TRANSPORT_ERROR is simulated on successful read and write
180 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
181 * This will continue on every subsequent command until some other action
182 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
183 * every_nth via sysfs).
186 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
187 * priority order. In the subset implemented here lower numbers have higher
188 * priority. The UA numbers should be a sequence starting from 0 with
189 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
190 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
191 #define SDEBUG_UA_BUS_RESET 1
192 #define SDEBUG_UA_MODE_CHANGED 2
193 #define SDEBUG_UA_CAPACITY_CHANGED 3
194 #define SDEBUG_UA_LUNS_CHANGED 4
195 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
196 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
197 #define SDEBUG_NUM_UAS 7
199 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200 * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205 * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
208 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
209 * (for response) per submit queue at one time. Can be reduced by max_queue
210 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
211 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
212 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
213 * but cannot exceed SDEBUG_CANQUEUE .
215 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
216 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
217 #define DEF_CMD_PER_LUN 255
221 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
223 #define F_RL_WLUN_OK 0x10
224 #define F_SKIP_UA 0x20
225 #define F_DELAY_OVERR 0x40
226 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
227 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
228 #define F_INV_OP 0x200
229 #define F_FAKE_RW 0x400
230 #define F_M_ACCESS 0x800 /* media access */
232 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
233 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
234 #define FF_SA (F_SA_HIGH | F_SA_LOW)
236 #define SDEBUG_MAX_PARTS 4
238 #define SDEBUG_MAX_CMD_LEN 32
241 struct sdebug_dev_info
{
242 struct list_head dev_list
;
243 unsigned int channel
;
247 struct sdebug_host_info
*sdbg_host
;
248 unsigned long uas_bm
[1];
254 struct sdebug_host_info
{
255 struct list_head host_list
;
256 struct Scsi_Host
*shost
;
258 struct list_head dev_info_list
;
261 #define to_sdebug_host(d) \
262 container_of(d, struct sdebug_host_info, dev)
264 struct sdebug_defer
{
266 struct execute_work ew
;
267 int sqa_idx
; /* index of sdebug_queue array */
268 int qc_idx
; /* index of sdebug_queued_cmd array within sqa_idx */
272 struct sdebug_queued_cmd
{
273 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
274 * instance indicates this slot is in use.
276 struct sdebug_defer
*sd_dp
;
277 struct scsi_cmnd
*a_cmnd
;
278 unsigned int inj_recovered
:1;
279 unsigned int inj_transport
:1;
280 unsigned int inj_dif
:1;
281 unsigned int inj_dix
:1;
282 unsigned int inj_short
:1;
285 struct sdebug_queue
{
286 struct sdebug_queued_cmd qc_arr
[SDEBUG_CANQUEUE
];
287 unsigned long in_use_bm
[SDEBUG_CANQUEUE_WORDS
];
289 atomic_t blocked
; /* to temporarily stop more being queued */
292 static atomic_t sdebug_cmnd_count
; /* number of incoming commands */
293 static atomic_t sdebug_completions
; /* count of deferred completions */
294 static atomic_t sdebug_miss_cpus
; /* submission + completion cpus differ */
295 static atomic_t sdebug_a_tsf
; /* 'almost task set full' counter */
297 struct opcode_info_t
{
298 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff */
299 /* for terminating element */
300 u8 opcode
; /* if num_attached > 0, preferred */
301 u16 sa
; /* service action */
302 u32 flags
; /* OR-ed set of SDEB_F_* */
303 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
304 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
305 u8 len_mask
[16]; /* len=len_mask[0], then mask for cdb[1]... */
306 /* ignore cdb bytes after position 15 */
309 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
310 enum sdeb_opcode_index
{
311 SDEB_I_INVALID_OPCODE
= 0,
313 SDEB_I_REPORT_LUNS
= 2,
314 SDEB_I_REQUEST_SENSE
= 3,
315 SDEB_I_TEST_UNIT_READY
= 4,
316 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
317 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
318 SDEB_I_LOG_SENSE
= 7,
319 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
320 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
321 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
322 SDEB_I_START_STOP
= 11,
323 SDEB_I_SERV_ACT_IN
= 12, /* 12, 16 */
324 SDEB_I_SERV_ACT_OUT
= 13, /* 12, 16 */
325 SDEB_I_MAINT_IN
= 14,
326 SDEB_I_MAINT_OUT
= 15,
327 SDEB_I_VERIFY
= 16, /* 10 only */
328 SDEB_I_VARIABLE_LEN
= 17,
329 SDEB_I_RESERVE
= 18, /* 6, 10 */
330 SDEB_I_RELEASE
= 19, /* 6, 10 */
331 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
332 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
333 SDEB_I_ATA_PT
= 22, /* 12, 16 */
334 SDEB_I_SEND_DIAG
= 23,
336 SDEB_I_XDWRITEREAD
= 25, /* 10 only */
337 SDEB_I_WRITE_BUFFER
= 26,
338 SDEB_I_WRITE_SAME
= 27, /* 10, 16 */
339 SDEB_I_SYNC_CACHE
= 28, /* 10 only */
340 SDEB_I_COMP_WRITE
= 29,
341 SDEB_I_LAST_ELEMENT
= 30, /* keep this last */
345 static const unsigned char opcode_ind_arr
[256] = {
346 /* 0x0; 0x0->0x1f: 6 byte cdbs */
347 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
349 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
350 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
352 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
353 SDEB_I_ALLOW_REMOVAL
, 0,
354 /* 0x20; 0x20->0x3f: 10 byte cdbs */
355 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
356 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
357 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE
, 0, 0,
358 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
359 /* 0x40; 0x40->0x5f: 10 byte cdbs */
360 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
362 0, 0, 0, SDEB_I_XDWRITEREAD
, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
364 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
365 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
366 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 0, SDEB_I_VARIABLE_LEN
,
369 /* 0x80; 0x80->0x9f: 16 byte cdbs */
370 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
371 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
372 0, 0, 0, SDEB_I_WRITE_SAME
, 0, 0, 0, 0,
373 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN
, SDEB_I_SERV_ACT_OUT
,
374 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
375 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
376 SDEB_I_MAINT_OUT
, 0, 0, 0,
377 SDEB_I_READ
, SDEB_I_SERV_ACT_OUT
, SDEB_I_WRITE
, SDEB_I_SERV_ACT_IN
,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 /* 0xc0; 0xc0->0xff: vendor specific */
382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
388 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
389 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
390 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
391 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
392 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
393 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
394 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
395 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
396 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
397 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
398 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
399 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
400 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
401 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
402 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
403 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
404 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
405 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
406 static int resp_xdwriteread_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
407 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
408 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
410 static const struct opcode_info_t msense_iarr
[1] = {
411 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
412 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415 static const struct opcode_info_t mselect_iarr
[1] = {
416 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
417 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 static const struct opcode_info_t read_iarr
[3] = {
421 {0, 0x28, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(10) */
422 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
424 {0, 0x8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
, /* READ(6) */
425 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 {0, 0xa8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(12) */
427 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
431 static const struct opcode_info_t write_iarr
[3] = {
432 {0, 0x2a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 10 */
433 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
435 {0, 0xa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 6 */
436 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 {0, 0xaa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 12 */
438 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
442 static const struct opcode_info_t sa_in_iarr
[1] = {
443 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
444 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 0xff, 0xff, 0xff, 0, 0xc7} },
448 static const struct opcode_info_t vl_iarr
[1] = { /* VARIABLE LENGTH */
449 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
,
450 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
451 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
454 static const struct opcode_info_t maint_in_iarr
[2] = {
455 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
456 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
458 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
459 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
463 static const struct opcode_info_t write_same_iarr
[1] = {
464 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_16
, NULL
,
465 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
466 0xff, 0xff, 0xff, 0x1f, 0xc7} },
469 static const struct opcode_info_t reserve_iarr
[1] = {
470 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
471 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 static const struct opcode_info_t release_iarr
[1] = {
475 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
476 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
480 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
481 * plus the terminating elements for logic that scans this table such as
482 * REPORT SUPPORTED OPERATION CODES. */
483 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEMENT
+ 1] = {
485 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
,
486 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
487 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
,
488 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
490 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
492 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
493 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
495 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 {1, 0x5a, 0, F_D_IN
, resp_mode_sense
, msense_iarr
,
497 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
499 {1, 0x55, 0, F_D_OUT
, resp_mode_select
, mselect_iarr
,
500 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
501 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
,
502 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
504 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
,
505 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
507 {3, 0x88, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, read_iarr
,
508 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
509 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
511 {3, 0x8a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, write_iarr
,
512 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
514 {0, 0x1b, 0, 0, resp_start_stop
, NULL
, /* START STOP UNIT */
515 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 {1, 0x9e, 0x10, F_SA_LOW
| F_D_IN
, resp_readcap16
, sa_in_iarr
,
517 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
519 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* SA OUT */
520 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 {2, 0xa3, 0xa, F_SA_LOW
| F_D_IN
, resp_report_tgtpgs
, maint_in_iarr
,
522 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
524 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
525 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0x2f, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, NULL
, NULL
, /* VERIFY(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
529 {1, 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
,
530 vl_iarr
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
531 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
532 {1, 0x56, 0, F_D_OUT
, NULL
, reserve_iarr
, /* RESERVE(10) */
533 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
535 {1, 0x57, 0, F_D_OUT
, NULL
, release_iarr
, /* RELEASE(10) */
536 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
539 {0, 0x1e, 0, 0, NULL
, NULL
, /* ALLOW REMOVAL */
540 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
541 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
542 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
544 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
546 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 {0, 0x42, 0, F_D_OUT
| FF_DIRECT_IO
, resp_unmap
, NULL
, /* UNMAP */
548 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
549 {0, 0x53, 0, F_D_IN
| F_D_OUT
| FF_DIRECT_IO
, resp_xdwriteread_10
,
550 NULL
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
552 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
553 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
554 0, 0, 0, 0} }, /* WRITE_BUFFER */
555 {1, 0x41, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_10
,
556 write_same_iarr
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
557 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
558 {0, 0x35, 0, F_DELAY_OVERR
| FF_DIRECT_IO
, NULL
, NULL
, /* SYNC_CACHE */
559 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
561 {0, 0x89, 0, F_D_OUT
| FF_DIRECT_IO
, resp_comp_write
, NULL
,
562 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
563 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
566 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
567 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 static int sdebug_add_host
= DEF_NUM_HOST
;
571 static int sdebug_ato
= DEF_ATO
;
572 static int sdebug_jdelay
= DEF_JDELAY
; /* if > 0 then unit is jiffies */
573 static int sdebug_dev_size_mb
= DEF_DEV_SIZE_MB
;
574 static int sdebug_dif
= DEF_DIF
;
575 static int sdebug_dix
= DEF_DIX
;
576 static int sdebug_dsense
= DEF_D_SENSE
;
577 static int sdebug_every_nth
= DEF_EVERY_NTH
;
578 static int sdebug_fake_rw
= DEF_FAKE_RW
;
579 static unsigned int sdebug_guard
= DEF_GUARD
;
580 static int sdebug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
581 static int sdebug_max_luns
= DEF_MAX_LUNS
;
582 static int sdebug_max_queue
= SDEBUG_CANQUEUE
; /* per submit queue */
583 static atomic_t retired_max_queue
; /* if > 0 then was prior max_queue */
584 static int sdebug_ndelay
= DEF_NDELAY
; /* if > 0 then unit is nanoseconds */
585 static int sdebug_no_lun_0
= DEF_NO_LUN_0
;
586 static int sdebug_no_uld
;
587 static int sdebug_num_parts
= DEF_NUM_PARTS
;
588 static int sdebug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
589 static int sdebug_opt_blks
= DEF_OPT_BLKS
;
590 static int sdebug_opts
= DEF_OPTS
;
591 static int sdebug_physblk_exp
= DEF_PHYSBLK_EXP
;
592 static int sdebug_ptype
= DEF_PTYPE
; /* SCSI peripheral device type */
593 static int sdebug_scsi_level
= DEF_SCSI_LEVEL
;
594 static int sdebug_sector_size
= DEF_SECTOR_SIZE
;
595 static int sdebug_virtual_gb
= DEF_VIRTUAL_GB
;
596 static int sdebug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
597 static unsigned int sdebug_lbpu
= DEF_LBPU
;
598 static unsigned int sdebug_lbpws
= DEF_LBPWS
;
599 static unsigned int sdebug_lbpws10
= DEF_LBPWS10
;
600 static unsigned int sdebug_lbprz
= DEF_LBPRZ
;
601 static unsigned int sdebug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
602 static unsigned int sdebug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
603 static unsigned int sdebug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
604 static unsigned int sdebug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
605 static unsigned int sdebug_write_same_length
= DEF_WRITESAME_LENGTH
;
606 static int sdebug_uuid_ctl
= DEF_UUID_CTL
;
607 static bool sdebug_removable
= DEF_REMOVABLE
;
608 static bool sdebug_clustering
;
609 static bool sdebug_host_lock
= DEF_HOST_LOCK
;
610 static bool sdebug_strict
= DEF_STRICT
;
611 static bool sdebug_any_injecting_opt
;
612 static bool sdebug_verbose
;
613 static bool have_dif_prot
;
614 static bool sdebug_statistics
= DEF_STATISTICS
;
615 static bool sdebug_mq_active
;
617 static unsigned int sdebug_store_sectors
;
618 static sector_t sdebug_capacity
; /* in sectors */
620 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
621 may still need them */
622 static int sdebug_heads
; /* heads per disk */
623 static int sdebug_cylinders_per
; /* cylinders per surface */
624 static int sdebug_sectors_per
; /* sectors per cylinder */
626 static LIST_HEAD(sdebug_host_list
);
627 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
629 static unsigned char *fake_storep
; /* ramdisk storage */
630 static struct sd_dif_tuple
*dif_storep
; /* protection info */
631 static void *map_storep
; /* provisioning map */
633 static unsigned long map_size
;
634 static int num_aborts
;
635 static int num_dev_resets
;
636 static int num_target_resets
;
637 static int num_bus_resets
;
638 static int num_host_resets
;
639 static int dix_writes
;
640 static int dix_reads
;
641 static int dif_errors
;
643 static int submit_queues
= DEF_SUBMIT_QUEUES
; /* > 1 for multi-queue (mq) */
644 static struct sdebug_queue
*sdebug_q_arr
; /* ptr to array of submit queues */
646 static DEFINE_RWLOCK(atomic_rw
);
648 static char sdebug_proc_name
[] = MY_NAME
;
649 static const char *my_name
= MY_NAME
;
651 static struct bus_type pseudo_lld_bus
;
653 static struct device_driver sdebug_driverfs_driver
= {
654 .name
= sdebug_proc_name
,
655 .bus
= &pseudo_lld_bus
,
658 static const int check_condition_result
=
659 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
661 static const int illegal_condition_result
=
662 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
664 static const int device_qfull_result
=
665 (DID_OK
<< 16) | (COMMAND_COMPLETE
<< 8) | SAM_STAT_TASK_SET_FULL
;
668 /* Only do the extra work involved in logical block provisioning if one or
669 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
670 * real reads and writes (i.e. not skipping them for speed).
672 static inline bool scsi_debug_lbp(void)
674 return 0 == sdebug_fake_rw
&&
675 (sdebug_lbpu
|| sdebug_lbpws
|| sdebug_lbpws10
);
678 static void *fake_store(unsigned long long lba
)
680 lba
= do_div(lba
, sdebug_store_sectors
);
682 return fake_storep
+ lba
* sdebug_sector_size
;
685 static struct sd_dif_tuple
*dif_store(sector_t sector
)
687 sector
= sector_div(sector
, sdebug_store_sectors
);
689 return dif_storep
+ sector
;
692 static void sdebug_max_tgts_luns(void)
694 struct sdebug_host_info
*sdbg_host
;
695 struct Scsi_Host
*hpnt
;
697 spin_lock(&sdebug_host_list_lock
);
698 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
699 hpnt
= sdbg_host
->shost
;
700 if ((hpnt
->this_id
>= 0) &&
701 (sdebug_num_tgts
> hpnt
->this_id
))
702 hpnt
->max_id
= sdebug_num_tgts
+ 1;
704 hpnt
->max_id
= sdebug_num_tgts
;
705 /* sdebug_max_luns; */
706 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
708 spin_unlock(&sdebug_host_list_lock
);
711 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
713 /* Set in_bit to -1 to indicate no bit position of invalid field */
714 static void mk_sense_invalid_fld(struct scsi_cmnd
*scp
,
715 enum sdeb_cmd_data c_d
,
716 int in_byte
, int in_bit
)
718 unsigned char *sbuff
;
722 sbuff
= scp
->sense_buffer
;
724 sdev_printk(KERN_ERR
, scp
->device
,
725 "%s: sense_buffer is NULL\n", __func__
);
728 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
729 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
730 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, ILLEGAL_REQUEST
, asc
, 0);
731 memset(sks
, 0, sizeof(sks
));
737 sks
[0] |= 0x7 & in_bit
;
739 put_unaligned_be16(in_byte
, sks
+ 1);
745 memcpy(sbuff
+ sl
+ 4, sks
, 3);
747 memcpy(sbuff
+ 15, sks
, 3);
749 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
750 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
751 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
754 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
756 unsigned char *sbuff
;
758 sbuff
= scp
->sense_buffer
;
760 sdev_printk(KERN_ERR
, scp
->device
,
761 "%s: sense_buffer is NULL\n", __func__
);
764 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
766 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, key
, asc
, asq
);
769 sdev_printk(KERN_INFO
, scp
->device
,
770 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
771 my_name
, key
, asc
, asq
);
774 static void mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
776 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
779 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
781 if (sdebug_verbose
) {
783 sdev_printk(KERN_INFO
, dev
,
784 "%s: BLKFLSBUF [0x1261]\n", __func__
);
785 else if (0x5331 == cmd
)
786 sdev_printk(KERN_INFO
, dev
,
787 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
790 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
794 /* return -ENOTTY; // correct return but upsets fdisk */
797 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
799 struct sdebug_host_info
*sdhp
;
800 struct sdebug_dev_info
*dp
;
802 spin_lock(&sdebug_host_list_lock
);
803 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
804 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
805 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
806 (devip
->target
== dp
->target
))
807 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
810 spin_unlock(&sdebug_host_list_lock
);
813 static int make_ua(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
817 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
818 if (k
!= SDEBUG_NUM_UAS
) {
819 const char *cp
= NULL
;
823 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
824 POWER_ON_RESET_ASCQ
);
826 cp
= "power on reset";
828 case SDEBUG_UA_BUS_RESET
:
829 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
834 case SDEBUG_UA_MODE_CHANGED
:
835 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
838 cp
= "mode parameters changed";
840 case SDEBUG_UA_CAPACITY_CHANGED
:
841 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
842 CAPACITY_CHANGED_ASCQ
);
844 cp
= "capacity data changed";
846 case SDEBUG_UA_MICROCODE_CHANGED
:
847 mk_sense_buffer(scp
, UNIT_ATTENTION
,
849 MICROCODE_CHANGED_ASCQ
);
851 cp
= "microcode has been changed";
853 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
854 mk_sense_buffer(scp
, UNIT_ATTENTION
,
856 MICROCODE_CHANGED_WO_RESET_ASCQ
);
858 cp
= "microcode has been changed without reset";
860 case SDEBUG_UA_LUNS_CHANGED
:
862 * SPC-3 behavior is to report a UNIT ATTENTION with
863 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 * on the target, until a REPORT LUNS command is
865 * received. SPC-4 behavior is to report it only once.
866 * NOTE: sdebug_scsi_level does not use the same
867 * values as struct scsi_device->scsi_level.
869 if (sdebug_scsi_level
>= 6) /* SPC-4 and above */
870 clear_luns_changed_on_target(devip
);
871 mk_sense_buffer(scp
, UNIT_ATTENTION
,
875 cp
= "reported luns data has changed";
878 pr_warn("unexpected unit attention code=%d\n", k
);
883 clear_bit(k
, devip
->uas_bm
);
885 sdev_printk(KERN_INFO
, scp
->device
,
886 "%s reports: Unit attention: %s\n",
888 return check_condition_result
;
893 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
894 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
898 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
902 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
903 return DID_ERROR
<< 16;
905 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
907 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
912 /* Returns number of bytes fetched into 'arr' or -1 if error. */
913 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
916 if (!scsi_bufflen(scp
))
918 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
921 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
925 static const char * inq_vendor_id
= "Linux ";
926 static const char * inq_product_id
= "scsi_debug ";
927 static const char *inq_product_rev
= "0186"; /* version less '.' */
928 static const u64 naa5_comp_a
= 0x5222222000000000ULL
;
929 static const u64 naa5_comp_b
= 0x5333333000000000ULL
;
930 static const u64 naa5_comp_c
= 0x5111111000000000ULL
;
932 /* Device identification VPD page. Returns number of bytes placed in arr */
933 static int inquiry_vpd_83(unsigned char *arr
, int port_group_id
,
934 int target_dev_id
, int dev_id_num
,
935 const char *dev_id_str
, int dev_id_str_len
,
936 const uuid_be
*lu_name
)
941 port_a
= target_dev_id
+ 1;
942 /* T10 vendor identifier field format (faked) */
943 arr
[0] = 0x2; /* ASCII */
946 memcpy(&arr
[4], inq_vendor_id
, 8);
947 memcpy(&arr
[12], inq_product_id
, 16);
948 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
949 num
= 8 + 16 + dev_id_str_len
;
952 if (dev_id_num
>= 0) {
953 if (sdebug_uuid_ctl
) {
954 /* Locally assigned UUID */
955 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
956 arr
[num
++] = 0xa; /* PIV=0, lu, naa */
959 arr
[num
++] = 0x10; /* uuid type=1, locally assigned */
961 memcpy(arr
+ num
, lu_name
, 16);
964 /* NAA-5, Logical unit identifier (binary) */
965 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
966 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
969 put_unaligned_be64(naa5_comp_b
+ dev_id_num
, arr
+ num
);
972 /* Target relative port number */
973 arr
[num
++] = 0x61; /* proto=sas, binary */
974 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
975 arr
[num
++] = 0x0; /* reserved */
976 arr
[num
++] = 0x4; /* length */
977 arr
[num
++] = 0x0; /* reserved */
978 arr
[num
++] = 0x0; /* reserved */
980 arr
[num
++] = 0x1; /* relative port A */
982 /* NAA-5, Target port identifier */
983 arr
[num
++] = 0x61; /* proto=sas, binary */
984 arr
[num
++] = 0x93; /* piv=1, target port, naa */
987 put_unaligned_be64(naa5_comp_a
+ port_a
, arr
+ num
);
989 /* NAA-5, Target port group identifier */
990 arr
[num
++] = 0x61; /* proto=sas, binary */
991 arr
[num
++] = 0x95; /* piv=1, target port group id */
996 put_unaligned_be16(port_group_id
, arr
+ num
);
998 /* NAA-5, Target device identifier */
999 arr
[num
++] = 0x61; /* proto=sas, binary */
1000 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1003 put_unaligned_be64(naa5_comp_a
+ target_dev_id
, arr
+ num
);
1005 /* SCSI name string: Target device identifier */
1006 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1007 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1010 memcpy(arr
+ num
, "naa.52222220", 12);
1012 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1013 memcpy(arr
+ num
, b
, 8);
1015 memset(arr
+ num
, 0, 4);
1020 static unsigned char vpd84_data
[] = {
1021 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1022 0x22,0x22,0x22,0x0,0xbb,0x1,
1023 0x22,0x22,0x22,0x0,0xbb,0x2,
1026 /* Software interface identification VPD page */
1027 static int inquiry_vpd_84(unsigned char *arr
)
1029 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1030 return sizeof(vpd84_data
);
1033 /* Management network addresses VPD page */
1034 static int inquiry_vpd_85(unsigned char *arr
)
1037 const char * na1
= "https://www.kernel.org/config";
1038 const char * na2
= "http://www.kernel.org/log";
1041 arr
[num
++] = 0x1; /* lu, storage config */
1042 arr
[num
++] = 0x0; /* reserved */
1047 plen
= ((plen
/ 4) + 1) * 4;
1048 arr
[num
++] = plen
; /* length, null termianted, padded */
1049 memcpy(arr
+ num
, na1
, olen
);
1050 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1053 arr
[num
++] = 0x4; /* lu, logging */
1054 arr
[num
++] = 0x0; /* reserved */
1059 plen
= ((plen
/ 4) + 1) * 4;
1060 arr
[num
++] = plen
; /* length, null terminated, padded */
1061 memcpy(arr
+ num
, na2
, olen
);
1062 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1068 /* SCSI ports VPD page */
1069 static int inquiry_vpd_88(unsigned char *arr
, int target_dev_id
)
1074 port_a
= target_dev_id
+ 1;
1075 port_b
= port_a
+ 1;
1076 arr
[num
++] = 0x0; /* reserved */
1077 arr
[num
++] = 0x0; /* reserved */
1079 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1080 memset(arr
+ num
, 0, 6);
1083 arr
[num
++] = 12; /* length tp descriptor */
1084 /* naa-5 target port identifier (A) */
1085 arr
[num
++] = 0x61; /* proto=sas, binary */
1086 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1087 arr
[num
++] = 0x0; /* reserved */
1088 arr
[num
++] = 0x8; /* length */
1089 put_unaligned_be64(naa5_comp_a
+ port_a
, arr
+ num
);
1091 arr
[num
++] = 0x0; /* reserved */
1092 arr
[num
++] = 0x0; /* reserved */
1094 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1095 memset(arr
+ num
, 0, 6);
1098 arr
[num
++] = 12; /* length tp descriptor */
1099 /* naa-5 target port identifier (B) */
1100 arr
[num
++] = 0x61; /* proto=sas, binary */
1101 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1102 arr
[num
++] = 0x0; /* reserved */
1103 arr
[num
++] = 0x8; /* length */
1104 put_unaligned_be64(naa5_comp_a
+ port_b
, arr
+ num
);
1111 static unsigned char vpd89_data
[] = {
1112 /* from 4th byte */ 0,0,0,0,
1113 'l','i','n','u','x',' ',' ',' ',
1114 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1116 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1118 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1119 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1120 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1121 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1123 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1125 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1127 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1128 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1129 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1131 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1132 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1133 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1134 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1138 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1139 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1140 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1141 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1149 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1150 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1151 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1155 /* ATA Information VPD page */
1156 static int inquiry_vpd_89(unsigned char *arr
)
1158 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1159 return sizeof(vpd89_data
);
1163 static unsigned char vpdb0_data
[] = {
1164 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1165 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1170 /* Block limits VPD page (SBC-3) */
1171 static int inquiry_vpd_b0(unsigned char *arr
)
1175 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1177 /* Optimal transfer length granularity */
1178 gran
= 1 << sdebug_physblk_exp
;
1179 put_unaligned_be16(gran
, arr
+ 2);
1181 /* Maximum Transfer Length */
1182 if (sdebug_store_sectors
> 0x400)
1183 put_unaligned_be32(sdebug_store_sectors
, arr
+ 4);
1185 /* Optimal Transfer Length */
1186 put_unaligned_be32(sdebug_opt_blks
, &arr
[8]);
1189 /* Maximum Unmap LBA Count */
1190 put_unaligned_be32(sdebug_unmap_max_blocks
, &arr
[16]);
1192 /* Maximum Unmap Block Descriptor Count */
1193 put_unaligned_be32(sdebug_unmap_max_desc
, &arr
[20]);
1196 /* Unmap Granularity Alignment */
1197 if (sdebug_unmap_alignment
) {
1198 put_unaligned_be32(sdebug_unmap_alignment
, &arr
[28]);
1199 arr
[28] |= 0x80; /* UGAVALID */
1202 /* Optimal Unmap Granularity */
1203 put_unaligned_be32(sdebug_unmap_granularity
, &arr
[24]);
1205 /* Maximum WRITE SAME Length */
1206 put_unaligned_be64(sdebug_write_same_length
, &arr
[32]);
1208 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1210 return sizeof(vpdb0_data
);
1213 /* Block device characteristics VPD page (SBC-3) */
1214 static int inquiry_vpd_b1(unsigned char *arr
)
1216 memset(arr
, 0, 0x3c);
1218 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1220 arr
[3] = 5; /* less than 1.8" */
1225 /* Logical block provisioning VPD page (SBC-4) */
1226 static int inquiry_vpd_b2(unsigned char *arr
)
1228 memset(arr
, 0, 0x4);
1229 arr
[0] = 0; /* threshold exponent */
1236 if (sdebug_lbprz
&& scsi_debug_lbp())
1237 arr
[1] |= (sdebug_lbprz
& 0x7) << 2; /* sbc4r07 and later */
1238 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1239 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1240 /* threshold_percentage=0 */
1244 #define SDEBUG_LONG_INQ_SZ 96
1245 #define SDEBUG_MAX_INQ_ARR_SZ 584
1247 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1249 unsigned char pq_pdt
;
1250 unsigned char * arr
;
1251 unsigned char *cmd
= scp
->cmnd
;
1252 int alloc_len
, n
, ret
;
1253 bool have_wlun
, is_disk
;
1255 alloc_len
= get_unaligned_be16(cmd
+ 3);
1256 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1258 return DID_REQUEUE
<< 16;
1259 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1260 have_wlun
= scsi_is_wlun(scp
->device
->lun
);
1262 pq_pdt
= TYPE_WLUN
; /* present, wlun */
1263 else if (sdebug_no_lun_0
&& (devip
->lun
== SDEBUG_LUN_0_VAL
))
1264 pq_pdt
= 0x7f; /* not present, PQ=3, PDT=0x1f */
1266 pq_pdt
= (sdebug_ptype
& 0x1f);
1268 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1269 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1271 return check_condition_result
;
1272 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1273 int lu_id_num
, port_group_id
, target_dev_id
, len
;
1275 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1277 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1278 (devip
->channel
& 0x7f);
1279 if (sdebug_vpd_use_hostno
== 0)
1281 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1282 (devip
->target
* 1000) + devip
->lun
);
1283 target_dev_id
= ((host_no
+ 1) * 2000) +
1284 (devip
->target
* 1000) - 3;
1285 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1286 if (0 == cmd
[2]) { /* supported vital product data pages */
1287 arr
[1] = cmd
[2]; /*sanity */
1289 arr
[n
++] = 0x0; /* this page */
1290 arr
[n
++] = 0x80; /* unit serial number */
1291 arr
[n
++] = 0x83; /* device identification */
1292 arr
[n
++] = 0x84; /* software interface ident. */
1293 arr
[n
++] = 0x85; /* management network addresses */
1294 arr
[n
++] = 0x86; /* extended inquiry */
1295 arr
[n
++] = 0x87; /* mode page policy */
1296 arr
[n
++] = 0x88; /* SCSI ports */
1297 if (is_disk
) { /* SBC only */
1298 arr
[n
++] = 0x89; /* ATA information */
1299 arr
[n
++] = 0xb0; /* Block limits */
1300 arr
[n
++] = 0xb1; /* Block characteristics */
1301 arr
[n
++] = 0xb2; /* Logical Block Prov */
1303 arr
[3] = n
- 4; /* number of supported VPD pages */
1304 } else if (0x80 == cmd
[2]) { /* unit serial number */
1305 arr
[1] = cmd
[2]; /*sanity */
1307 memcpy(&arr
[4], lu_id_str
, len
);
1308 } else if (0x83 == cmd
[2]) { /* device identification */
1309 arr
[1] = cmd
[2]; /*sanity */
1310 arr
[3] = inquiry_vpd_83(&arr
[4], port_group_id
,
1311 target_dev_id
, lu_id_num
,
1314 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1315 arr
[1] = cmd
[2]; /*sanity */
1316 arr
[3] = inquiry_vpd_84(&arr
[4]);
1317 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1318 arr
[1] = cmd
[2]; /*sanity */
1319 arr
[3] = inquiry_vpd_85(&arr
[4]);
1320 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1321 arr
[1] = cmd
[2]; /*sanity */
1322 arr
[3] = 0x3c; /* number of following entries */
1323 if (sdebug_dif
== SD_DIF_TYPE3_PROTECTION
)
1324 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1325 else if (have_dif_prot
)
1326 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1328 arr
[4] = 0x0; /* no protection stuff */
1329 arr
[5] = 0x7; /* head of q, ordered + simple q's */
1330 } else if (0x87 == cmd
[2]) { /* mode page policy */
1331 arr
[1] = cmd
[2]; /*sanity */
1332 arr
[3] = 0x8; /* number of following entries */
1333 arr
[4] = 0x2; /* disconnect-reconnect mp */
1334 arr
[6] = 0x80; /* mlus, shared */
1335 arr
[8] = 0x18; /* protocol specific lu */
1336 arr
[10] = 0x82; /* mlus, per initiator port */
1337 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1338 arr
[1] = cmd
[2]; /*sanity */
1339 arr
[3] = inquiry_vpd_88(&arr
[4], target_dev_id
);
1340 } else if (is_disk
&& 0x89 == cmd
[2]) { /* ATA information */
1341 arr
[1] = cmd
[2]; /*sanity */
1342 n
= inquiry_vpd_89(&arr
[4]);
1343 put_unaligned_be16(n
, arr
+ 2);
1344 } else if (is_disk
&& 0xb0 == cmd
[2]) { /* Block limits */
1345 arr
[1] = cmd
[2]; /*sanity */
1346 arr
[3] = inquiry_vpd_b0(&arr
[4]);
1347 } else if (is_disk
&& 0xb1 == cmd
[2]) { /* Block char. */
1348 arr
[1] = cmd
[2]; /*sanity */
1349 arr
[3] = inquiry_vpd_b1(&arr
[4]);
1350 } else if (is_disk
&& 0xb2 == cmd
[2]) { /* LB Prov. */
1351 arr
[1] = cmd
[2]; /*sanity */
1352 arr
[3] = inquiry_vpd_b2(&arr
[4]);
1354 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
1356 return check_condition_result
;
1358 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
1359 ret
= fill_from_dev_buffer(scp
, arr
,
1360 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1364 /* drops through here for a standard inquiry */
1365 arr
[1] = sdebug_removable
? 0x80 : 0; /* Removable disk */
1366 arr
[2] = sdebug_scsi_level
;
1367 arr
[3] = 2; /* response_data_format==2 */
1368 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
1369 arr
[5] = (int)have_dif_prot
; /* PROTECT bit */
1370 if (sdebug_vpd_use_hostno
== 0)
1371 arr
[5] = 0x10; /* claim: implicit TGPS */
1372 arr
[6] = 0x10; /* claim: MultiP */
1373 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1374 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
1375 memcpy(&arr
[8], inq_vendor_id
, 8);
1376 memcpy(&arr
[16], inq_product_id
, 16);
1377 memcpy(&arr
[32], inq_product_rev
, 4);
1378 /* version descriptors (2 bytes each) follow */
1379 put_unaligned_be16(0xc0, arr
+ 58); /* SAM-6 no version claimed */
1380 put_unaligned_be16(0x5c0, arr
+ 60); /* SPC-5 no version claimed */
1382 if (is_disk
) { /* SBC-4 no version claimed */
1383 put_unaligned_be16(0x600, arr
+ n
);
1385 } else if (sdebug_ptype
== TYPE_TAPE
) { /* SSC-4 rev 3 */
1386 put_unaligned_be16(0x525, arr
+ n
);
1389 put_unaligned_be16(0x2100, arr
+ n
); /* SPL-4 no version claimed */
1390 ret
= fill_from_dev_buffer(scp
, arr
,
1391 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
1396 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1399 static int resp_requests(struct scsi_cmnd
* scp
,
1400 struct sdebug_dev_info
* devip
)
1402 unsigned char * sbuff
;
1403 unsigned char *cmd
= scp
->cmnd
;
1404 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
];
1408 memset(arr
, 0, sizeof(arr
));
1409 dsense
= !!(cmd
[1] & 1);
1410 sbuff
= scp
->sense_buffer
;
1411 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
1414 arr
[1] = 0x0; /* NO_SENSE in sense_key */
1415 arr
[2] = THRESHOLD_EXCEEDED
;
1416 arr
[3] = 0xff; /* TEST set and MRIE==6 */
1420 arr
[2] = 0x0; /* NO_SENSE in sense_key */
1421 arr
[7] = 0xa; /* 18 byte sense buffer */
1422 arr
[12] = THRESHOLD_EXCEEDED
;
1423 arr
[13] = 0xff; /* TEST set and MRIE==6 */
1426 memcpy(arr
, sbuff
, SCSI_SENSE_BUFFERSIZE
);
1427 if (arr
[0] >= 0x70 && dsense
== sdebug_dsense
)
1428 ; /* have sense and formats match */
1429 else if (arr
[0] <= 0x70) {
1439 } else if (dsense
) {
1442 arr
[1] = sbuff
[2]; /* sense key */
1443 arr
[2] = sbuff
[12]; /* asc */
1444 arr
[3] = sbuff
[13]; /* ascq */
1456 mk_sense_buffer(scp
, 0, NO_ADDITIONAL_SENSE
, 0);
1457 return fill_from_dev_buffer(scp
, arr
, len
);
1460 static int resp_start_stop(struct scsi_cmnd
* scp
,
1461 struct sdebug_dev_info
* devip
)
1463 unsigned char *cmd
= scp
->cmnd
;
1464 int power_cond
, stop
;
1466 power_cond
= (cmd
[4] & 0xf0) >> 4;
1468 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
1469 return check_condition_result
;
1471 stop
= !(cmd
[4] & 1);
1472 atomic_xchg(&devip
->stopped
, stop
);
1476 static sector_t
get_sdebug_capacity(void)
1478 static const unsigned int gibibyte
= 1073741824;
1480 if (sdebug_virtual_gb
> 0)
1481 return (sector_t
)sdebug_virtual_gb
*
1482 (gibibyte
/ sdebug_sector_size
);
1484 return sdebug_store_sectors
;
1487 #define SDEBUG_READCAP_ARR_SZ 8
1488 static int resp_readcap(struct scsi_cmnd
* scp
,
1489 struct sdebug_dev_info
* devip
)
1491 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1494 /* following just in case virtual_gb changed */
1495 sdebug_capacity
= get_sdebug_capacity();
1496 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1497 if (sdebug_capacity
< 0xffffffff) {
1498 capac
= (unsigned int)sdebug_capacity
- 1;
1499 put_unaligned_be32(capac
, arr
+ 0);
1501 put_unaligned_be32(0xffffffff, arr
+ 0);
1502 put_unaligned_be16(sdebug_sector_size
, arr
+ 6);
1503 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1506 #define SDEBUG_READCAP16_ARR_SZ 32
1507 static int resp_readcap16(struct scsi_cmnd
* scp
,
1508 struct sdebug_dev_info
* devip
)
1510 unsigned char *cmd
= scp
->cmnd
;
1511 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1514 alloc_len
= get_unaligned_be32(cmd
+ 10);
1515 /* following just in case virtual_gb changed */
1516 sdebug_capacity
= get_sdebug_capacity();
1517 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1518 put_unaligned_be64((u64
)(sdebug_capacity
- 1), arr
+ 0);
1519 put_unaligned_be32(sdebug_sector_size
, arr
+ 8);
1520 arr
[13] = sdebug_physblk_exp
& 0xf;
1521 arr
[14] = (sdebug_lowest_aligned
>> 8) & 0x3f;
1523 if (scsi_debug_lbp()) {
1524 arr
[14] |= 0x80; /* LBPME */
1525 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1526 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1527 * in the wider field maps to 0 in this field.
1529 if (sdebug_lbprz
& 1) /* precisely what the draft requires */
1533 arr
[15] = sdebug_lowest_aligned
& 0xff;
1535 if (have_dif_prot
) {
1536 arr
[12] = (sdebug_dif
- 1) << 1; /* P_TYPE */
1537 arr
[12] |= 1; /* PROT_EN */
1540 return fill_from_dev_buffer(scp
, arr
,
1541 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1544 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1546 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1547 struct sdebug_dev_info
* devip
)
1549 unsigned char *cmd
= scp
->cmnd
;
1550 unsigned char * arr
;
1551 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1552 int n
, ret
, alen
, rlen
;
1553 int port_group_a
, port_group_b
, port_a
, port_b
;
1555 alen
= get_unaligned_be32(cmd
+ 6);
1556 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1558 return DID_REQUEUE
<< 16;
1560 * EVPD page 0x88 states we have two ports, one
1561 * real and a fake port with no device connected.
1562 * So we create two port groups with one port each
1563 * and set the group with port B to unavailable.
1565 port_a
= 0x1; /* relative port A */
1566 port_b
= 0x2; /* relative port B */
1567 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1568 (devip
->channel
& 0x7f);
1569 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1570 (devip
->channel
& 0x7f) + 0x80;
1573 * The asymmetric access state is cycled according to the host_id.
1576 if (sdebug_vpd_use_hostno
== 0) {
1577 arr
[n
++] = host_no
% 3; /* Asymm access state */
1578 arr
[n
++] = 0x0F; /* claim: all states are supported */
1580 arr
[n
++] = 0x0; /* Active/Optimized path */
1581 arr
[n
++] = 0x01; /* only support active/optimized paths */
1583 put_unaligned_be16(port_group_a
, arr
+ n
);
1585 arr
[n
++] = 0; /* Reserved */
1586 arr
[n
++] = 0; /* Status code */
1587 arr
[n
++] = 0; /* Vendor unique */
1588 arr
[n
++] = 0x1; /* One port per group */
1589 arr
[n
++] = 0; /* Reserved */
1590 arr
[n
++] = 0; /* Reserved */
1591 put_unaligned_be16(port_a
, arr
+ n
);
1593 arr
[n
++] = 3; /* Port unavailable */
1594 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1595 put_unaligned_be16(port_group_b
, arr
+ n
);
1597 arr
[n
++] = 0; /* Reserved */
1598 arr
[n
++] = 0; /* Status code */
1599 arr
[n
++] = 0; /* Vendor unique */
1600 arr
[n
++] = 0x1; /* One port per group */
1601 arr
[n
++] = 0; /* Reserved */
1602 arr
[n
++] = 0; /* Reserved */
1603 put_unaligned_be16(port_b
, arr
+ n
);
1607 put_unaligned_be32(rlen
, arr
+ 0);
1610 * Return the smallest value of either
1611 * - The allocated length
1612 * - The constructed command length
1613 * - The maximum array size
1616 ret
= fill_from_dev_buffer(scp
, arr
,
1617 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1622 static int resp_rsup_opcodes(struct scsi_cmnd
*scp
,
1623 struct sdebug_dev_info
*devip
)
1626 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
1628 u32 alloc_len
, a_len
;
1629 int k
, offset
, len
, errsts
, count
, bump
, na
;
1630 const struct opcode_info_t
*oip
;
1631 const struct opcode_info_t
*r_oip
;
1633 u8
*cmd
= scp
->cmnd
;
1635 rctd
= !!(cmd
[2] & 0x80);
1636 reporting_opts
= cmd
[2] & 0x7;
1637 req_opcode
= cmd
[3];
1638 req_sa
= get_unaligned_be16(cmd
+ 4);
1639 alloc_len
= get_unaligned_be32(cmd
+ 6);
1640 if (alloc_len
< 4 || alloc_len
> 0xffff) {
1641 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1642 return check_condition_result
;
1644 if (alloc_len
> 8192)
1648 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
1650 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
1652 return check_condition_result
;
1654 switch (reporting_opts
) {
1655 case 0: /* all commands */
1656 /* count number of commands */
1657 for (count
= 0, oip
= opcode_info_arr
;
1658 oip
->num_attached
!= 0xff; ++oip
) {
1659 if (F_INV_OP
& oip
->flags
)
1661 count
+= (oip
->num_attached
+ 1);
1663 bump
= rctd
? 20 : 8;
1664 put_unaligned_be32(count
* bump
, arr
);
1665 for (offset
= 4, oip
= opcode_info_arr
;
1666 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
1667 if (F_INV_OP
& oip
->flags
)
1669 na
= oip
->num_attached
;
1670 arr
[offset
] = oip
->opcode
;
1671 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1673 arr
[offset
+ 5] |= 0x2;
1674 if (FF_SA
& oip
->flags
)
1675 arr
[offset
+ 5] |= 0x1;
1676 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
1678 put_unaligned_be16(0xa, arr
+ offset
+ 8);
1680 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
1681 if (F_INV_OP
& oip
->flags
)
1684 arr
[offset
] = oip
->opcode
;
1685 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1687 arr
[offset
+ 5] |= 0x2;
1688 if (FF_SA
& oip
->flags
)
1689 arr
[offset
+ 5] |= 0x1;
1690 put_unaligned_be16(oip
->len_mask
[0],
1693 put_unaligned_be16(0xa,
1700 case 1: /* one command: opcode only */
1701 case 2: /* one command: opcode plus service action */
1702 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1703 sdeb_i
= opcode_ind_arr
[req_opcode
];
1704 oip
= &opcode_info_arr
[sdeb_i
];
1705 if (F_INV_OP
& oip
->flags
) {
1709 if (1 == reporting_opts
) {
1710 if (FF_SA
& oip
->flags
) {
1711 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
1714 return check_condition_result
;
1717 } else if (2 == reporting_opts
&&
1718 0 == (FF_SA
& oip
->flags
)) {
1719 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
1720 kfree(arr
); /* point at requested sa */
1721 return check_condition_result
;
1723 if (0 == (FF_SA
& oip
->flags
) &&
1724 req_opcode
== oip
->opcode
)
1726 else if (0 == (FF_SA
& oip
->flags
)) {
1727 na
= oip
->num_attached
;
1728 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1730 if (req_opcode
== oip
->opcode
)
1733 supp
= (k
>= na
) ? 1 : 3;
1734 } else if (req_sa
!= oip
->sa
) {
1735 na
= oip
->num_attached
;
1736 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1738 if (req_sa
== oip
->sa
)
1741 supp
= (k
>= na
) ? 1 : 3;
1745 u
= oip
->len_mask
[0];
1746 put_unaligned_be16(u
, arr
+ 2);
1747 arr
[4] = oip
->opcode
;
1748 for (k
= 1; k
< u
; ++k
)
1749 arr
[4 + k
] = (k
< 16) ?
1750 oip
->len_mask
[k
] : 0xff;
1755 arr
[1] = (rctd
? 0x80 : 0) | supp
;
1757 put_unaligned_be16(0xa, arr
+ offset
);
1762 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
1764 return check_condition_result
;
1766 offset
= (offset
< a_len
) ? offset
: a_len
;
1767 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
1768 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
1773 static int resp_rsup_tmfs(struct scsi_cmnd
*scp
,
1774 struct sdebug_dev_info
*devip
)
1779 u8
*cmd
= scp
->cmnd
;
1781 memset(arr
, 0, sizeof(arr
));
1782 repd
= !!(cmd
[2] & 0x80);
1783 alloc_len
= get_unaligned_be32(cmd
+ 6);
1784 if (alloc_len
< 4) {
1785 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1786 return check_condition_result
;
1788 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
1789 arr
[1] = 0x1; /* ITNRS */
1796 len
= (len
< alloc_len
) ? len
: alloc_len
;
1797 return fill_from_dev_buffer(scp
, arr
, len
);
1800 /* <<Following mode page info copied from ST318451LW>> */
1802 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1803 { /* Read-Write Error Recovery page for mode_sense */
1804 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1807 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1809 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1810 return sizeof(err_recov_pg
);
1813 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1814 { /* Disconnect-Reconnect page for mode_sense */
1815 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1816 0, 0, 0, 0, 0, 0, 0, 0};
1818 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1820 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1821 return sizeof(disconnect_pg
);
1824 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1825 { /* Format device page for mode_sense */
1826 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1827 0, 0, 0, 0, 0, 0, 0, 0,
1828 0, 0, 0, 0, 0x40, 0, 0, 0};
1830 memcpy(p
, format_pg
, sizeof(format_pg
));
1831 put_unaligned_be16(sdebug_sectors_per
, p
+ 10);
1832 put_unaligned_be16(sdebug_sector_size
, p
+ 12);
1833 if (sdebug_removable
)
1834 p
[20] |= 0x20; /* should agree with INQUIRY */
1836 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1837 return sizeof(format_pg
);
1840 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1841 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1844 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1845 { /* Caching page for mode_sense */
1846 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1847 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1848 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1849 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1851 if (SDEBUG_OPT_N_WCE
& sdebug_opts
)
1852 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1853 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1855 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
1856 else if (2 == pcontrol
)
1857 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
1858 return sizeof(caching_pg
);
1861 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1864 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1865 { /* Control mode page for mode_sense */
1866 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1868 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1872 ctrl_m_pg
[2] |= 0x4;
1874 ctrl_m_pg
[2] &= ~0x4;
1877 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1879 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1881 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1882 else if (2 == pcontrol
)
1883 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1884 return sizeof(ctrl_m_pg
);
1888 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1889 { /* Informational Exceptions control mode page for mode_sense */
1890 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1892 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1895 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1897 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1898 else if (2 == pcontrol
)
1899 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1900 return sizeof(iec_m_pg
);
1903 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1904 { /* SAS SSP mode page - short format for mode_sense */
1905 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1906 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1908 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1910 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1911 return sizeof(sas_sf_m_pg
);
1915 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1917 { /* SAS phy control and discover mode page for mode_sense */
1918 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1919 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1920 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1921 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1922 0x2, 0, 0, 0, 0, 0, 0, 0,
1923 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1924 0, 0, 0, 0, 0, 0, 0, 0,
1925 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1926 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1927 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1928 0x3, 0, 0, 0, 0, 0, 0, 0,
1929 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1930 0, 0, 0, 0, 0, 0, 0, 0,
1934 put_unaligned_be64(naa5_comp_a
, sas_pcd_m_pg
+ 16);
1935 put_unaligned_be64(naa5_comp_c
+ 1, sas_pcd_m_pg
+ 24);
1936 put_unaligned_be64(naa5_comp_a
, sas_pcd_m_pg
+ 64);
1937 put_unaligned_be64(naa5_comp_c
+ 1, sas_pcd_m_pg
+ 72);
1938 port_a
= target_dev_id
+ 1;
1939 port_b
= port_a
+ 1;
1940 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1941 put_unaligned_be32(port_a
, p
+ 20);
1942 put_unaligned_be32(port_b
, p
+ 48 + 20);
1944 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1945 return sizeof(sas_pcd_m_pg
);
1948 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1949 { /* SAS SSP shared protocol specific port mode subpage */
1950 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1951 0, 0, 0, 0, 0, 0, 0, 0,
1954 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1956 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1957 return sizeof(sas_sha_m_pg
);
1960 #define SDEBUG_MAX_MSENSE_SZ 256
1962 static int resp_mode_sense(struct scsi_cmnd
*scp
,
1963 struct sdebug_dev_info
*devip
)
1965 int pcontrol
, pcode
, subpcode
, bd_len
;
1966 unsigned char dev_spec
;
1967 int alloc_len
, offset
, len
, target_dev_id
;
1968 int target
= scp
->device
->id
;
1970 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1971 unsigned char *cmd
= scp
->cmnd
;
1972 bool dbd
, llbaa
, msense_6
, is_disk
, bad_pcode
;
1974 dbd
= !!(cmd
[1] & 0x8); /* disable block descriptors */
1975 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1976 pcode
= cmd
[2] & 0x3f;
1978 msense_6
= (MODE_SENSE
== cmd
[0]);
1979 llbaa
= msense_6
? false : !!(cmd
[1] & 0x10);
1980 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1981 if (is_disk
&& !dbd
)
1982 bd_len
= llbaa
? 16 : 8;
1985 alloc_len
= msense_6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
1986 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1987 if (0x3 == pcontrol
) { /* Saving values not supported */
1988 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
1989 return check_condition_result
;
1991 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1992 (devip
->target
* 1000) - 3;
1993 /* for disks set DPOFUA bit and clear write protect (WP) bit */
1995 dev_spec
= 0x10; /* =0x90 if WP=1 implies read-only */
2005 arr
[4] = 0x1; /* set LONGLBA bit */
2006 arr
[7] = bd_len
; /* assume 255 or less */
2010 if ((bd_len
> 0) && (!sdebug_capacity
))
2011 sdebug_capacity
= get_sdebug_capacity();
2014 if (sdebug_capacity
> 0xfffffffe)
2015 put_unaligned_be32(0xffffffff, ap
+ 0);
2017 put_unaligned_be32(sdebug_capacity
, ap
+ 0);
2018 put_unaligned_be16(sdebug_sector_size
, ap
+ 6);
2021 } else if (16 == bd_len
) {
2022 put_unaligned_be64((u64
)sdebug_capacity
, ap
+ 0);
2023 put_unaligned_be32(sdebug_sector_size
, ap
+ 12);
2028 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
2029 /* TODO: Control Extension page */
2030 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2031 return check_condition_result
;
2036 case 0x1: /* Read-Write error recovery page, direct access */
2037 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2040 case 0x2: /* Disconnect-Reconnect page, all devices */
2041 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2044 case 0x3: /* Format device page, direct access */
2046 len
= resp_format_pg(ap
, pcontrol
, target
);
2051 case 0x8: /* Caching page, direct access */
2053 len
= resp_caching_pg(ap
, pcontrol
, target
);
2058 case 0xa: /* Control Mode page, all devices */
2059 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2062 case 0x19: /* if spc==1 then sas phy, control+discover */
2063 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
2064 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2065 return check_condition_result
;
2068 if ((0x0 == subpcode
) || (0xff == subpcode
))
2069 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2070 if ((0x1 == subpcode
) || (0xff == subpcode
))
2071 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2073 if ((0x2 == subpcode
) || (0xff == subpcode
))
2074 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2077 case 0x1c: /* Informational Exceptions Mode page, all devices */
2078 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2081 case 0x3f: /* Read all Mode pages */
2082 if ((0 == subpcode
) || (0xff == subpcode
)) {
2083 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2084 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2086 len
+= resp_format_pg(ap
+ len
, pcontrol
,
2088 len
+= resp_caching_pg(ap
+ len
, pcontrol
,
2091 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2092 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2093 if (0xff == subpcode
) {
2094 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
2095 target
, target_dev_id
);
2096 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2098 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2101 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2102 return check_condition_result
;
2110 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2111 return check_condition_result
;
2114 arr
[0] = offset
- 1;
2116 put_unaligned_be16((offset
- 2), arr
+ 0);
2117 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
2120 #define SDEBUG_MAX_MSELECT_SZ 512
2122 static int resp_mode_select(struct scsi_cmnd
*scp
,
2123 struct sdebug_dev_info
*devip
)
2125 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2126 int param_len
, res
, mpage
;
2127 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2128 unsigned char *cmd
= scp
->cmnd
;
2129 int mselect6
= (MODE_SELECT
== cmd
[0]);
2131 memset(arr
, 0, sizeof(arr
));
2134 param_len
= mselect6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2135 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2136 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2137 return check_condition_result
;
2139 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2141 return DID_ERROR
<< 16;
2142 else if (sdebug_verbose
&& (res
< param_len
))
2143 sdev_printk(KERN_INFO
, scp
->device
,
2144 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2145 __func__
, param_len
, res
);
2146 md_len
= mselect6
? (arr
[0] + 1) : (get_unaligned_be16(arr
+ 0) + 2);
2147 bd_len
= mselect6
? arr
[3] : get_unaligned_be16(arr
+ 6);
2149 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2150 return check_condition_result
;
2152 off
= bd_len
+ (mselect6
? 4 : 8);
2153 mpage
= arr
[off
] & 0x3f;
2154 ps
= !!(arr
[off
] & 0x80);
2156 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2157 return check_condition_result
;
2159 spf
= !!(arr
[off
] & 0x40);
2160 pg_len
= spf
? (get_unaligned_be16(arr
+ off
+ 2) + 4) :
2162 if ((pg_len
+ off
) > param_len
) {
2163 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2164 PARAMETER_LIST_LENGTH_ERR
, 0);
2165 return check_condition_result
;
2168 case 0x8: /* Caching Mode page */
2169 if (caching_pg
[1] == arr
[off
+ 1]) {
2170 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2171 sizeof(caching_pg
) - 2);
2172 goto set_mode_changed_ua
;
2175 case 0xa: /* Control Mode page */
2176 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2177 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2178 sizeof(ctrl_m_pg
) - 2);
2179 sdebug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2180 goto set_mode_changed_ua
;
2183 case 0x1c: /* Informational Exceptions Mode page */
2184 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2185 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2186 sizeof(iec_m_pg
) - 2);
2187 goto set_mode_changed_ua
;
2193 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2194 return check_condition_result
;
2195 set_mode_changed_ua
:
2196 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2200 static int resp_temp_l_pg(unsigned char * arr
)
2202 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2203 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2206 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2207 return sizeof(temp_l_pg
);
2210 static int resp_ie_l_pg(unsigned char * arr
)
2212 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2215 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2216 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2217 arr
[4] = THRESHOLD_EXCEEDED
;
2220 return sizeof(ie_l_pg
);
2223 #define SDEBUG_MAX_LSENSE_SZ 512
2225 static int resp_log_sense(struct scsi_cmnd
* scp
,
2226 struct sdebug_dev_info
* devip
)
2228 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, len
, n
;
2229 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2230 unsigned char *cmd
= scp
->cmnd
;
2232 memset(arr
, 0, sizeof(arr
));
2236 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
2237 return check_condition_result
;
2239 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2240 pcode
= cmd
[2] & 0x3f;
2241 subpcode
= cmd
[3] & 0xff;
2242 alloc_len
= get_unaligned_be16(cmd
+ 7);
2244 if (0 == subpcode
) {
2246 case 0x0: /* Supported log pages log page */
2248 arr
[n
++] = 0x0; /* this page */
2249 arr
[n
++] = 0xd; /* Temperature */
2250 arr
[n
++] = 0x2f; /* Informational exceptions */
2253 case 0xd: /* Temperature log page */
2254 arr
[3] = resp_temp_l_pg(arr
+ 4);
2256 case 0x2f: /* Informational exceptions log page */
2257 arr
[3] = resp_ie_l_pg(arr
+ 4);
2260 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2261 return check_condition_result
;
2263 } else if (0xff == subpcode
) {
2267 case 0x0: /* Supported log pages and subpages log page */
2270 arr
[n
++] = 0x0; /* 0,0 page */
2272 arr
[n
++] = 0xff; /* this page */
2274 arr
[n
++] = 0x0; /* Temperature */
2276 arr
[n
++] = 0x0; /* Informational exceptions */
2279 case 0xd: /* Temperature subpages */
2282 arr
[n
++] = 0x0; /* Temperature */
2285 case 0x2f: /* Informational exceptions subpages */
2288 arr
[n
++] = 0x0; /* Informational exceptions */
2292 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2293 return check_condition_result
;
2296 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2297 return check_condition_result
;
2299 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
2300 return fill_from_dev_buffer(scp
, arr
,
2301 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
2304 static int check_device_access_params(struct scsi_cmnd
*scp
,
2305 unsigned long long lba
, unsigned int num
)
2307 if (lba
+ num
> sdebug_capacity
) {
2308 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2309 return check_condition_result
;
2311 /* transfer length excessive (tie in to block limits VPD page) */
2312 if (num
> sdebug_store_sectors
) {
2313 /* needs work to find which cdb byte 'num' comes from */
2314 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2315 return check_condition_result
;
2320 /* Returns number of bytes copied or -1 if error. */
2321 static int do_device_access(struct scsi_cmnd
*scmd
, u64 lba
, u32 num
,
2325 u64 block
, rest
= 0;
2326 struct scsi_data_buffer
*sdb
;
2327 enum dma_data_direction dir
;
2330 sdb
= scsi_out(scmd
);
2331 dir
= DMA_TO_DEVICE
;
2333 sdb
= scsi_in(scmd
);
2334 dir
= DMA_FROM_DEVICE
;
2339 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
2342 block
= do_div(lba
, sdebug_store_sectors
);
2343 if (block
+ num
> sdebug_store_sectors
)
2344 rest
= block
+ num
- sdebug_store_sectors
;
2346 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2347 fake_storep
+ (block
* sdebug_sector_size
),
2348 (num
- rest
) * sdebug_sector_size
, 0, do_write
);
2349 if (ret
!= (num
- rest
) * sdebug_sector_size
)
2353 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2354 fake_storep
, rest
* sdebug_sector_size
,
2355 (num
- rest
) * sdebug_sector_size
, do_write
);
2361 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2362 * arr into fake_store(lba,num) and return true. If comparison fails then
2364 static bool comp_write_worker(u64 lba
, u32 num
, const u8
*arr
)
2367 u64 block
, rest
= 0;
2368 u32 store_blks
= sdebug_store_sectors
;
2369 u32 lb_size
= sdebug_sector_size
;
2371 block
= do_div(lba
, store_blks
);
2372 if (block
+ num
> store_blks
)
2373 rest
= block
+ num
- store_blks
;
2375 res
= !memcmp(fake_storep
+ (block
* lb_size
), arr
,
2376 (num
- rest
) * lb_size
);
2380 res
= memcmp(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2384 arr
+= num
* lb_size
;
2385 memcpy(fake_storep
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2387 memcpy(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2392 static __be16
dif_compute_csum(const void *buf
, int len
)
2397 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
2399 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
2404 static int dif_verify(struct sd_dif_tuple
*sdt
, const void *data
,
2405 sector_t sector
, u32 ei_lba
)
2407 __be16 csum
= dif_compute_csum(data
, sdebug_sector_size
);
2409 if (sdt
->guard_tag
!= csum
) {
2410 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2411 (unsigned long)sector
,
2412 be16_to_cpu(sdt
->guard_tag
),
2416 if (sdebug_dif
== SD_DIF_TYPE1_PROTECTION
&&
2417 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
2418 pr_err("REF check failed on sector %lu\n",
2419 (unsigned long)sector
);
2422 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2423 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
2424 pr_err("REF check failed on sector %lu\n",
2425 (unsigned long)sector
);
2431 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
2432 unsigned int sectors
, bool read
)
2436 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
2437 struct sg_mapping_iter miter
;
2439 /* Bytes of protection data to copy into sgl */
2440 resid
= sectors
* sizeof(*dif_storep
);
2442 sg_miter_start(&miter
, scsi_prot_sglist(SCpnt
),
2443 scsi_prot_sg_count(SCpnt
), SG_MITER_ATOMIC
|
2444 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
2446 while (sg_miter_next(&miter
) && resid
> 0) {
2447 size_t len
= min(miter
.length
, resid
);
2448 void *start
= dif_store(sector
);
2451 if (dif_store_end
< start
+ len
)
2452 rest
= start
+ len
- dif_store_end
;
2457 memcpy(paddr
, start
, len
- rest
);
2459 memcpy(start
, paddr
, len
- rest
);
2463 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
2465 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
2468 sector
+= len
/ sizeof(*dif_storep
);
2471 sg_miter_stop(&miter
);
2474 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2475 unsigned int sectors
, u32 ei_lba
)
2478 struct sd_dif_tuple
*sdt
;
2481 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
2484 sector
= start_sec
+ i
;
2485 sdt
= dif_store(sector
);
2487 if (sdt
->app_tag
== cpu_to_be16(0xffff))
2490 ret
= dif_verify(sdt
, fake_store(sector
), sector
, ei_lba
);
2497 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
2503 static int resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2505 u8
*cmd
= scp
->cmnd
;
2506 struct sdebug_queued_cmd
*sqcp
;
2510 unsigned long iflags
;
2517 lba
= get_unaligned_be64(cmd
+ 2);
2518 num
= get_unaligned_be32(cmd
+ 10);
2523 lba
= get_unaligned_be32(cmd
+ 2);
2524 num
= get_unaligned_be16(cmd
+ 7);
2529 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2530 (u32
)(cmd
[1] & 0x1f) << 16;
2531 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2536 lba
= get_unaligned_be32(cmd
+ 2);
2537 num
= get_unaligned_be32(cmd
+ 6);
2540 case XDWRITEREAD_10
:
2542 lba
= get_unaligned_be32(cmd
+ 2);
2543 num
= get_unaligned_be16(cmd
+ 7);
2546 default: /* assume READ(32) */
2547 lba
= get_unaligned_be64(cmd
+ 12);
2548 ei_lba
= get_unaligned_be32(cmd
+ 20);
2549 num
= get_unaligned_be32(cmd
+ 28);
2553 if (unlikely(have_dif_prot
&& check_prot
)) {
2554 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2556 mk_sense_invalid_opcode(scp
);
2557 return check_condition_result
;
2559 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
2560 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2561 (cmd
[1] & 0xe0) == 0)
2562 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
2565 if (unlikely(sdebug_any_injecting_opt
)) {
2566 sqcp
= (struct sdebug_queued_cmd
*)scp
->host_scribble
;
2569 if (sqcp
->inj_short
)
2575 /* inline check_device_access_params() */
2576 if (unlikely(lba
+ num
> sdebug_capacity
)) {
2577 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2578 return check_condition_result
;
2580 /* transfer length excessive (tie in to block limits VPD page) */
2581 if (unlikely(num
> sdebug_store_sectors
)) {
2582 /* needs work to find which cdb byte 'num' comes from */
2583 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2584 return check_condition_result
;
2587 if (unlikely((SDEBUG_OPT_MEDIUM_ERR
& sdebug_opts
) &&
2588 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
2589 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
))) {
2590 /* claim unrecoverable read error */
2591 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
2592 /* set info field and valid bit for fixed descriptor */
2593 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
2594 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
2595 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
2596 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
2597 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
2599 scsi_set_resid(scp
, scsi_bufflen(scp
));
2600 return check_condition_result
;
2603 read_lock_irqsave(&atomic_rw
, iflags
);
2606 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
2607 int prot_ret
= prot_verify_read(scp
, lba
, num
, ei_lba
);
2610 read_unlock_irqrestore(&atomic_rw
, iflags
);
2611 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, prot_ret
);
2612 return illegal_condition_result
;
2616 ret
= do_device_access(scp
, lba
, num
, false);
2617 read_unlock_irqrestore(&atomic_rw
, iflags
);
2618 if (unlikely(ret
== -1))
2619 return DID_ERROR
<< 16;
2621 scsi_in(scp
)->resid
= scsi_bufflen(scp
) - ret
;
2623 if (unlikely(sqcp
)) {
2624 if (sqcp
->inj_recovered
) {
2625 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2626 THRESHOLD_EXCEEDED
, 0);
2627 return check_condition_result
;
2628 } else if (sqcp
->inj_transport
) {
2629 mk_sense_buffer(scp
, ABORTED_COMMAND
,
2630 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
2631 return check_condition_result
;
2632 } else if (sqcp
->inj_dif
) {
2633 /* Logical block guard check failed */
2634 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2635 return illegal_condition_result
;
2636 } else if (sqcp
->inj_dix
) {
2637 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2638 return illegal_condition_result
;
2644 static void dump_sector(unsigned char *buf
, int len
)
2648 pr_err(">>> Sector Dump <<<\n");
2649 for (i
= 0 ; i
< len
; i
+= 16) {
2652 for (j
= 0, n
= 0; j
< 16; j
++) {
2653 unsigned char c
= buf
[i
+j
];
2655 if (c
>= 0x20 && c
< 0x7e)
2656 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2659 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2662 pr_err("%04d: %s\n", i
, b
);
2666 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2667 unsigned int sectors
, u32 ei_lba
)
2670 struct sd_dif_tuple
*sdt
;
2672 sector_t sector
= start_sec
;
2675 struct sg_mapping_iter diter
;
2676 struct sg_mapping_iter piter
;
2678 BUG_ON(scsi_sg_count(SCpnt
) == 0);
2679 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
2681 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
2682 scsi_prot_sg_count(SCpnt
),
2683 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2684 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
2685 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2687 /* For each protection page */
2688 while (sg_miter_next(&piter
)) {
2690 if (WARN_ON(!sg_miter_next(&diter
))) {
2695 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
2696 ppage_offset
+= sizeof(struct sd_dif_tuple
)) {
2697 /* If we're at the end of the current
2698 * data page advance to the next one
2700 if (dpage_offset
>= diter
.length
) {
2701 if (WARN_ON(!sg_miter_next(&diter
))) {
2708 sdt
= piter
.addr
+ ppage_offset
;
2709 daddr
= diter
.addr
+ dpage_offset
;
2711 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
2713 dump_sector(daddr
, sdebug_sector_size
);
2719 dpage_offset
+= sdebug_sector_size
;
2721 diter
.consumed
= dpage_offset
;
2722 sg_miter_stop(&diter
);
2724 sg_miter_stop(&piter
);
2726 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
2733 sg_miter_stop(&diter
);
2734 sg_miter_stop(&piter
);
2738 static unsigned long lba_to_map_index(sector_t lba
)
2740 if (sdebug_unmap_alignment
)
2741 lba
+= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2742 sector_div(lba
, sdebug_unmap_granularity
);
2746 static sector_t
map_index_to_lba(unsigned long index
)
2748 sector_t lba
= index
* sdebug_unmap_granularity
;
2750 if (sdebug_unmap_alignment
)
2751 lba
-= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2755 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2758 unsigned int mapped
;
2759 unsigned long index
;
2762 index
= lba_to_map_index(lba
);
2763 mapped
= test_bit(index
, map_storep
);
2766 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2768 next
= find_next_bit(map_storep
, map_size
, index
);
2770 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2775 static void map_region(sector_t lba
, unsigned int len
)
2777 sector_t end
= lba
+ len
;
2780 unsigned long index
= lba_to_map_index(lba
);
2782 if (index
< map_size
)
2783 set_bit(index
, map_storep
);
2785 lba
= map_index_to_lba(index
+ 1);
2789 static void unmap_region(sector_t lba
, unsigned int len
)
2791 sector_t end
= lba
+ len
;
2794 unsigned long index
= lba_to_map_index(lba
);
2796 if (lba
== map_index_to_lba(index
) &&
2797 lba
+ sdebug_unmap_granularity
<= end
&&
2799 clear_bit(index
, map_storep
);
2800 if (sdebug_lbprz
) { /* for LBPRZ=2 return 0xff_s */
2801 memset(fake_storep
+
2802 lba
* sdebug_sector_size
,
2803 (sdebug_lbprz
& 1) ? 0 : 0xff,
2804 sdebug_sector_size
*
2805 sdebug_unmap_granularity
);
2808 memset(dif_storep
+ lba
, 0xff,
2809 sizeof(*dif_storep
) *
2810 sdebug_unmap_granularity
);
2813 lba
= map_index_to_lba(index
+ 1);
2817 static int resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2819 u8
*cmd
= scp
->cmnd
;
2823 unsigned long iflags
;
2830 lba
= get_unaligned_be64(cmd
+ 2);
2831 num
= get_unaligned_be32(cmd
+ 10);
2836 lba
= get_unaligned_be32(cmd
+ 2);
2837 num
= get_unaligned_be16(cmd
+ 7);
2842 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2843 (u32
)(cmd
[1] & 0x1f) << 16;
2844 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2849 lba
= get_unaligned_be32(cmd
+ 2);
2850 num
= get_unaligned_be32(cmd
+ 6);
2853 case 0x53: /* XDWRITEREAD(10) */
2855 lba
= get_unaligned_be32(cmd
+ 2);
2856 num
= get_unaligned_be16(cmd
+ 7);
2859 default: /* assume WRITE(32) */
2860 lba
= get_unaligned_be64(cmd
+ 12);
2861 ei_lba
= get_unaligned_be32(cmd
+ 20);
2862 num
= get_unaligned_be32(cmd
+ 28);
2866 if (unlikely(have_dif_prot
&& check_prot
)) {
2867 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2869 mk_sense_invalid_opcode(scp
);
2870 return check_condition_result
;
2872 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
2873 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2874 (cmd
[1] & 0xe0) == 0)
2875 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
2879 /* inline check_device_access_params() */
2880 if (unlikely(lba
+ num
> sdebug_capacity
)) {
2881 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2882 return check_condition_result
;
2884 /* transfer length excessive (tie in to block limits VPD page) */
2885 if (unlikely(num
> sdebug_store_sectors
)) {
2886 /* needs work to find which cdb byte 'num' comes from */
2887 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2888 return check_condition_result
;
2891 write_lock_irqsave(&atomic_rw
, iflags
);
2894 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
2895 int prot_ret
= prot_verify_write(scp
, lba
, num
, ei_lba
);
2898 write_unlock_irqrestore(&atomic_rw
, iflags
);
2899 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2900 return illegal_condition_result
;
2904 ret
= do_device_access(scp
, lba
, num
, true);
2905 if (unlikely(scsi_debug_lbp()))
2906 map_region(lba
, num
);
2907 write_unlock_irqrestore(&atomic_rw
, iflags
);
2908 if (unlikely(-1 == ret
))
2909 return DID_ERROR
<< 16;
2910 else if (unlikely(sdebug_verbose
&&
2911 (ret
< (num
* sdebug_sector_size
))))
2912 sdev_printk(KERN_INFO
, scp
->device
,
2913 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2914 my_name
, num
* sdebug_sector_size
, ret
);
2916 if (unlikely(sdebug_any_injecting_opt
)) {
2917 struct sdebug_queued_cmd
*sqcp
=
2918 (struct sdebug_queued_cmd
*)scp
->host_scribble
;
2921 if (sqcp
->inj_recovered
) {
2922 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2923 THRESHOLD_EXCEEDED
, 0);
2924 return check_condition_result
;
2925 } else if (sqcp
->inj_dif
) {
2926 /* Logical block guard check failed */
2927 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2928 return illegal_condition_result
;
2929 } else if (sqcp
->inj_dix
) {
2930 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2931 return illegal_condition_result
;
2938 static int resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
,
2939 u32 ei_lba
, bool unmap
, bool ndob
)
2941 unsigned long iflags
;
2942 unsigned long long i
;
2946 ret
= check_device_access_params(scp
, lba
, num
);
2950 write_lock_irqsave(&atomic_rw
, iflags
);
2952 if (unmap
&& scsi_debug_lbp()) {
2953 unmap_region(lba
, num
);
2957 lba_off
= lba
* sdebug_sector_size
;
2958 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2960 memset(fake_storep
+ lba_off
, 0, sdebug_sector_size
);
2963 ret
= fetch_to_dev_buffer(scp
, fake_storep
+ lba_off
,
2964 sdebug_sector_size
);
2967 write_unlock_irqrestore(&atomic_rw
, iflags
);
2968 return DID_ERROR
<< 16;
2969 } else if (sdebug_verbose
&& (ret
< (num
* sdebug_sector_size
)))
2970 sdev_printk(KERN_INFO
, scp
->device
,
2971 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2972 my_name
, "write same",
2973 num
* sdebug_sector_size
, ret
);
2975 /* Copy first sector to remaining blocks */
2976 for (i
= 1 ; i
< num
; i
++)
2977 memcpy(fake_storep
+ ((lba
+ i
) * sdebug_sector_size
),
2978 fake_storep
+ lba_off
,
2979 sdebug_sector_size
);
2981 if (scsi_debug_lbp())
2982 map_region(lba
, num
);
2984 write_unlock_irqrestore(&atomic_rw
, iflags
);
2989 static int resp_write_same_10(struct scsi_cmnd
*scp
,
2990 struct sdebug_dev_info
*devip
)
2992 u8
*cmd
= scp
->cmnd
;
2999 if (sdebug_lbpws10
== 0) {
3000 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3001 return check_condition_result
;
3005 lba
= get_unaligned_be32(cmd
+ 2);
3006 num
= get_unaligned_be16(cmd
+ 7);
3007 if (num
> sdebug_write_same_length
) {
3008 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3009 return check_condition_result
;
3011 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
3014 static int resp_write_same_16(struct scsi_cmnd
*scp
,
3015 struct sdebug_dev_info
*devip
)
3017 u8
*cmd
= scp
->cmnd
;
3024 if (cmd
[1] & 0x8) { /* UNMAP */
3025 if (sdebug_lbpws
== 0) {
3026 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3027 return check_condition_result
;
3031 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3033 lba
= get_unaligned_be64(cmd
+ 2);
3034 num
= get_unaligned_be32(cmd
+ 10);
3035 if (num
> sdebug_write_same_length
) {
3036 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
3037 return check_condition_result
;
3039 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
3042 /* Note the mode field is in the same position as the (lower) service action
3043 * field. For the Report supported operation codes command, SPC-4 suggests
3044 * each mode of this command should be reported separately; for future. */
3045 static int resp_write_buffer(struct scsi_cmnd
*scp
,
3046 struct sdebug_dev_info
*devip
)
3048 u8
*cmd
= scp
->cmnd
;
3049 struct scsi_device
*sdp
= scp
->device
;
3050 struct sdebug_dev_info
*dp
;
3053 mode
= cmd
[1] & 0x1f;
3055 case 0x4: /* download microcode (MC) and activate (ACT) */
3056 /* set UAs on this device only */
3057 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3058 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
3060 case 0x5: /* download MC, save and ACT */
3061 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
3063 case 0x6: /* download MC with offsets and ACT */
3064 /* set UAs on most devices (LUs) in this target */
3065 list_for_each_entry(dp
,
3066 &devip
->sdbg_host
->dev_info_list
,
3068 if (dp
->target
== sdp
->id
) {
3069 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
3071 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
3075 case 0x7: /* download MC with offsets, save, and ACT */
3076 /* set UA on all devices (LUs) in this target */
3077 list_for_each_entry(dp
,
3078 &devip
->sdbg_host
->dev_info_list
,
3080 if (dp
->target
== sdp
->id
)
3081 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
3085 /* do nothing for this command for other mode values */
3091 static int resp_comp_write(struct scsi_cmnd
*scp
,
3092 struct sdebug_dev_info
*devip
)
3094 u8
*cmd
= scp
->cmnd
;
3096 u8
*fake_storep_hold
;
3099 u32 lb_size
= sdebug_sector_size
;
3101 unsigned long iflags
;
3105 lba
= get_unaligned_be64(cmd
+ 2);
3106 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
3108 return 0; /* degenerate case, not an error */
3109 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3111 mk_sense_invalid_opcode(scp
);
3112 return check_condition_result
;
3114 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
3115 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3116 (cmd
[1] & 0xe0) == 0)
3117 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3120 /* inline check_device_access_params() */
3121 if (lba
+ num
> sdebug_capacity
) {
3122 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3123 return check_condition_result
;
3125 /* transfer length excessive (tie in to block limits VPD page) */
3126 if (num
> sdebug_store_sectors
) {
3127 /* needs work to find which cdb byte 'num' comes from */
3128 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3129 return check_condition_result
;
3132 arr
= kzalloc(dnum
* lb_size
, GFP_ATOMIC
);
3134 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3136 return check_condition_result
;
3139 write_lock_irqsave(&atomic_rw
, iflags
);
3141 /* trick do_device_access() to fetch both compare and write buffers
3142 * from data-in into arr. Safe (atomic) since write_lock held. */
3143 fake_storep_hold
= fake_storep
;
3145 ret
= do_device_access(scp
, 0, dnum
, true);
3146 fake_storep
= fake_storep_hold
;
3148 retval
= DID_ERROR
<< 16;
3150 } else if (sdebug_verbose
&& (ret
< (dnum
* lb_size
)))
3151 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
3152 "indicated=%u, IO sent=%d bytes\n", my_name
,
3153 dnum
* lb_size
, ret
);
3154 if (!comp_write_worker(lba
, num
, arr
)) {
3155 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
3156 retval
= check_condition_result
;
3159 if (scsi_debug_lbp())
3160 map_region(lba
, num
);
3162 write_unlock_irqrestore(&atomic_rw
, iflags
);
3167 struct unmap_block_desc
{
3173 static int resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3176 struct unmap_block_desc
*desc
;
3177 unsigned int i
, payload_len
, descriptors
;
3179 unsigned long iflags
;
3182 if (!scsi_debug_lbp())
3183 return 0; /* fib and say its done */
3184 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
3185 BUG_ON(scsi_bufflen(scp
) != payload_len
);
3187 descriptors
= (payload_len
- 8) / 16;
3188 if (descriptors
> sdebug_unmap_max_desc
) {
3189 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3190 return check_condition_result
;
3193 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3195 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3197 return check_condition_result
;
3200 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3202 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
3203 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
3205 desc
= (void *)&buf
[8];
3207 write_lock_irqsave(&atomic_rw
, iflags
);
3209 for (i
= 0 ; i
< descriptors
; i
++) {
3210 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
3211 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
3213 ret
= check_device_access_params(scp
, lba
, num
);
3217 unmap_region(lba
, num
);
3223 write_unlock_irqrestore(&atomic_rw
, iflags
);
3229 #define SDEBUG_GET_LBA_STATUS_LEN 32
3231 static int resp_get_lba_status(struct scsi_cmnd
*scp
,
3232 struct sdebug_dev_info
*devip
)
3234 u8
*cmd
= scp
->cmnd
;
3236 u32 alloc_len
, mapped
, num
;
3237 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
3240 lba
= get_unaligned_be64(cmd
+ 2);
3241 alloc_len
= get_unaligned_be32(cmd
+ 10);
3246 ret
= check_device_access_params(scp
, lba
, 1);
3250 if (scsi_debug_lbp())
3251 mapped
= map_state(lba
, &num
);
3254 /* following just in case virtual_gb changed */
3255 sdebug_capacity
= get_sdebug_capacity();
3256 if (sdebug_capacity
- lba
<= 0xffffffff)
3257 num
= sdebug_capacity
- lba
;
3262 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
3263 put_unaligned_be32(20, arr
); /* Parameter Data Length */
3264 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
3265 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
3266 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
3268 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
3271 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3272 * (W-LUN), the normal Linux scanning logic does not associate it with a
3273 * device (e.g. /dev/sg7). The following magic will make that association:
3274 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3275 * where <n> is a host number. If there are multiple targets in a host then
3276 * the above will associate a W-LUN to each target. To only get a W-LUN
3277 * for target 2, then use "echo '- 2 49409' > scan" .
3279 static int resp_report_luns(struct scsi_cmnd
*scp
,
3280 struct sdebug_dev_info
*devip
)
3282 unsigned char *cmd
= scp
->cmnd
;
3283 unsigned int alloc_len
;
3284 unsigned char select_report
;
3286 struct scsi_lun
*lun_p
;
3288 unsigned int lun_cnt
; /* normal LUN count (max: 256) */
3289 unsigned int wlun_cnt
; /* report luns W-LUN count */
3290 unsigned int tlun_cnt
; /* total LUN count */
3291 unsigned int rlen
; /* response length (in bytes) */
3294 clear_luns_changed_on_target(devip
);
3296 select_report
= cmd
[2];
3297 alloc_len
= get_unaligned_be32(cmd
+ 6);
3299 if (alloc_len
< 4) {
3300 pr_err("alloc len too small %d\n", alloc_len
);
3301 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
3302 return check_condition_result
;
3305 switch (select_report
) {
3306 case 0: /* all LUNs apart from W-LUNs */
3307 lun_cnt
= sdebug_max_luns
;
3310 case 1: /* only W-LUNs */
3314 case 2: /* all LUNs */
3315 lun_cnt
= sdebug_max_luns
;
3318 case 0x10: /* only administrative LUs */
3319 case 0x11: /* see SPC-5 */
3320 case 0x12: /* only subsiduary LUs owned by referenced LU */
3322 pr_debug("select report invalid %d\n", select_report
);
3323 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
3324 return check_condition_result
;
3327 if (sdebug_no_lun_0
&& (lun_cnt
> 0))
3330 tlun_cnt
= lun_cnt
+ wlun_cnt
;
3332 rlen
= (tlun_cnt
* sizeof(struct scsi_lun
)) + 8;
3333 arr
= vmalloc(rlen
);
3335 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3337 return check_condition_result
;
3339 memset(arr
, 0, rlen
);
3340 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3341 select_report
, lun_cnt
, wlun_cnt
, sdebug_no_lun_0
);
3343 /* luns start at byte 8 in response following the header */
3344 lun_p
= (struct scsi_lun
*)&arr
[8];
3346 /* LUNs use single level peripheral device addressing method */
3347 lun
= sdebug_no_lun_0
? 1 : 0;
3348 for (i
= 0; i
< lun_cnt
; i
++)
3349 int_to_scsilun(lun
++, lun_p
++);
3352 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS
, lun_p
++);
3354 put_unaligned_be32(rlen
- 8, &arr
[0]);
3356 res
= fill_from_dev_buffer(scp
, arr
, rlen
);
3361 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
3362 unsigned int num
, struct sdebug_dev_info
*devip
)
3365 unsigned char *kaddr
, *buf
;
3366 unsigned int offset
;
3367 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
3368 struct sg_mapping_iter miter
;
3370 /* better not to use temporary buffer. */
3371 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3373 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3375 return check_condition_result
;
3378 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3381 sg_miter_start(&miter
, sdb
->table
.sgl
, sdb
->table
.nents
,
3382 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
3384 while (sg_miter_next(&miter
)) {
3386 for (j
= 0; j
< miter
.length
; j
++)
3387 *(kaddr
+ j
) ^= *(buf
+ offset
+ j
);
3389 offset
+= miter
.length
;
3391 sg_miter_stop(&miter
);
3397 static int resp_xdwriteread_10(struct scsi_cmnd
*scp
,
3398 struct sdebug_dev_info
*devip
)
3400 u8
*cmd
= scp
->cmnd
;
3405 if (!scsi_bidi_cmnd(scp
)) {
3406 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3408 return check_condition_result
;
3410 errsts
= resp_read_dt0(scp
, devip
);
3413 if (!(cmd
[1] & 0x4)) { /* DISABLE_WRITE is not set */
3414 errsts
= resp_write_dt0(scp
, devip
);
3418 lba
= get_unaligned_be32(cmd
+ 2);
3419 num
= get_unaligned_be16(cmd
+ 7);
3420 return resp_xdwriteread(scp
, lba
, num
, devip
);
3423 static struct sdebug_queue
*get_queue(struct scsi_cmnd
*cmnd
)
3425 struct sdebug_queue
*sqp
= sdebug_q_arr
;
3427 if (sdebug_mq_active
) {
3428 u32 tag
= blk_mq_unique_tag(cmnd
->request
);
3429 u16 hwq
= blk_mq_unique_tag_to_hwq(tag
);
3431 if (unlikely(hwq
>= submit_queues
)) {
3432 pr_warn("Unexpected hwq=%d, apply modulo\n", hwq
);
3433 hwq
%= submit_queues
;
3435 pr_debug("tag=%u, hwq=%d\n", tag
, hwq
);
3441 /* Queued (deferred) command completions converge here. */
3442 static void sdebug_q_cmd_complete(struct sdebug_defer
*sd_dp
)
3446 unsigned long iflags
;
3447 struct sdebug_queue
*sqp
;
3448 struct sdebug_queued_cmd
*sqcp
;
3449 struct scsi_cmnd
*scp
;
3450 struct sdebug_dev_info
*devip
;
3452 qc_idx
= sd_dp
->qc_idx
;
3453 sqp
= sdebug_q_arr
+ sd_dp
->sqa_idx
;
3454 if (sdebug_statistics
) {
3455 atomic_inc(&sdebug_completions
);
3456 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
3457 atomic_inc(&sdebug_miss_cpus
);
3459 if (unlikely((qc_idx
< 0) || (qc_idx
>= SDEBUG_CANQUEUE
))) {
3460 pr_err("wild qc_idx=%d\n", qc_idx
);
3463 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3464 sqcp
= &sqp
->qc_arr
[qc_idx
];
3466 if (unlikely(scp
== NULL
)) {
3467 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3468 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3469 sd_dp
->sqa_idx
, qc_idx
);
3472 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
3474 atomic_dec(&devip
->num_in_q
);
3476 pr_err("devip=NULL\n");
3477 if (unlikely(atomic_read(&retired_max_queue
) > 0))
3480 sqcp
->a_cmnd
= NULL
;
3481 if (unlikely(!test_and_clear_bit(qc_idx
, sqp
->in_use_bm
))) {
3482 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3483 pr_err("Unexpected completion\n");
3487 if (unlikely(retiring
)) { /* user has reduced max_queue */
3490 retval
= atomic_read(&retired_max_queue
);
3491 if (qc_idx
>= retval
) {
3492 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3493 pr_err("index %d too large\n", retval
);
3496 k
= find_last_bit(sqp
->in_use_bm
, retval
);
3497 if ((k
< sdebug_max_queue
) || (k
== retval
))
3498 atomic_set(&retired_max_queue
, 0);
3500 atomic_set(&retired_max_queue
, k
+ 1);
3502 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3503 scp
->scsi_done(scp
); /* callback to mid level */
3506 /* When high resolution timer goes off this function is called. */
3507 static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
3509 struct sdebug_defer
*sd_dp
= container_of(timer
, struct sdebug_defer
,
3511 sdebug_q_cmd_complete(sd_dp
);
3512 return HRTIMER_NORESTART
;
3515 /* When work queue schedules work, it calls this function. */
3516 static void sdebug_q_cmd_wq_complete(struct work_struct
*work
)
3518 struct sdebug_defer
*sd_dp
= container_of(work
, struct sdebug_defer
,
3520 sdebug_q_cmd_complete(sd_dp
);
3523 static bool got_shared_uuid
;
3524 static uuid_be shared_uuid
;
3526 static struct sdebug_dev_info
*sdebug_device_create(
3527 struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
3529 struct sdebug_dev_info
*devip
;
3531 devip
= kzalloc(sizeof(*devip
), flags
);
3533 if (sdebug_uuid_ctl
== 1)
3534 uuid_be_gen(&devip
->lu_name
);
3535 else if (sdebug_uuid_ctl
== 2) {
3536 if (got_shared_uuid
)
3537 devip
->lu_name
= shared_uuid
;
3539 uuid_be_gen(&shared_uuid
);
3540 got_shared_uuid
= true;
3541 devip
->lu_name
= shared_uuid
;
3544 devip
->sdbg_host
= sdbg_host
;
3545 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
3550 static struct sdebug_dev_info
*find_build_dev_info(struct scsi_device
*sdev
)
3552 struct sdebug_host_info
*sdbg_host
;
3553 struct sdebug_dev_info
*open_devip
= NULL
;
3554 struct sdebug_dev_info
*devip
;
3556 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
3558 pr_err("Host info NULL\n");
3561 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
3562 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
3563 (devip
->target
== sdev
->id
) &&
3564 (devip
->lun
== sdev
->lun
))
3567 if ((!devip
->used
) && (!open_devip
))
3571 if (!open_devip
) { /* try and make a new one */
3572 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
3574 pr_err("out of memory at line %d\n", __LINE__
);
3579 open_devip
->channel
= sdev
->channel
;
3580 open_devip
->target
= sdev
->id
;
3581 open_devip
->lun
= sdev
->lun
;
3582 open_devip
->sdbg_host
= sdbg_host
;
3583 atomic_set(&open_devip
->num_in_q
, 0);
3584 set_bit(SDEBUG_UA_POR
, open_devip
->uas_bm
);
3585 open_devip
->used
= true;
3589 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
3592 pr_info("slave_alloc <%u %u %u %llu>\n",
3593 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3594 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
3598 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
3600 struct sdebug_dev_info
*devip
=
3601 (struct sdebug_dev_info
*)sdp
->hostdata
;
3604 pr_info("slave_configure <%u %u %u %llu>\n",
3605 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3606 if (sdp
->host
->max_cmd_len
!= SDEBUG_MAX_CMD_LEN
)
3607 sdp
->host
->max_cmd_len
= SDEBUG_MAX_CMD_LEN
;
3608 if (devip
== NULL
) {
3609 devip
= find_build_dev_info(sdp
);
3611 return 1; /* no resources, will be marked offline */
3613 sdp
->hostdata
= devip
;
3614 blk_queue_max_segment_size(sdp
->request_queue
, -1U);
3616 sdp
->no_uld_attach
= 1;
3620 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
3622 struct sdebug_dev_info
*devip
=
3623 (struct sdebug_dev_info
*)sdp
->hostdata
;
3626 pr_info("slave_destroy <%u %u %u %llu>\n",
3627 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3629 /* make this slot available for re-use */
3630 devip
->used
= false;
3631 sdp
->hostdata
= NULL
;
3635 static void stop_qc_helper(struct sdebug_defer
*sd_dp
)
3639 if ((sdebug_jdelay
> 0) || (sdebug_ndelay
> 0))
3640 hrtimer_cancel(&sd_dp
->hrt
);
3641 else if (sdebug_jdelay
< 0)
3642 cancel_work_sync(&sd_dp
->ew
.work
);
3645 /* If @cmnd found deletes its timer or work queue and returns true; else
3647 static bool stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
3649 unsigned long iflags
;
3650 int j
, k
, qmax
, r_qmax
;
3651 struct sdebug_queue
*sqp
;
3652 struct sdebug_queued_cmd
*sqcp
;
3653 struct sdebug_dev_info
*devip
;
3654 struct sdebug_defer
*sd_dp
;
3656 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3657 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3658 qmax
= sdebug_max_queue
;
3659 r_qmax
= atomic_read(&retired_max_queue
);
3662 for (k
= 0; k
< qmax
; ++k
) {
3663 if (test_bit(k
, sqp
->in_use_bm
)) {
3664 sqcp
= &sqp
->qc_arr
[k
];
3665 if (cmnd
!= sqcp
->a_cmnd
)
3668 devip
= (struct sdebug_dev_info
*)
3669 cmnd
->device
->hostdata
;
3671 atomic_dec(&devip
->num_in_q
);
3672 sqcp
->a_cmnd
= NULL
;
3673 sd_dp
= sqcp
->sd_dp
;
3674 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3675 stop_qc_helper(sd_dp
);
3676 clear_bit(k
, sqp
->in_use_bm
);
3680 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3685 /* Deletes (stops) timers or work queues of all queued commands */
3686 static void stop_all_queued(void)
3688 unsigned long iflags
;
3690 struct sdebug_queue
*sqp
;
3691 struct sdebug_queued_cmd
*sqcp
;
3692 struct sdebug_dev_info
*devip
;
3693 struct sdebug_defer
*sd_dp
;
3695 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3696 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3697 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
3698 if (test_bit(k
, sqp
->in_use_bm
)) {
3699 sqcp
= &sqp
->qc_arr
[k
];
3700 if (sqcp
->a_cmnd
== NULL
)
3702 devip
= (struct sdebug_dev_info
*)
3703 sqcp
->a_cmnd
->device
->hostdata
;
3705 atomic_dec(&devip
->num_in_q
);
3706 sqcp
->a_cmnd
= NULL
;
3707 sd_dp
= sqcp
->sd_dp
;
3708 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3709 stop_qc_helper(sd_dp
);
3710 clear_bit(k
, sqp
->in_use_bm
);
3711 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3714 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3718 /* Free queued command memory on heap */
3719 static void free_all_queued(void)
3722 struct sdebug_queue
*sqp
;
3723 struct sdebug_queued_cmd
*sqcp
;
3725 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3726 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
3727 sqcp
= &sqp
->qc_arr
[k
];
3734 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
3740 ok
= stop_queued_cmnd(SCpnt
);
3741 if (SCpnt
->device
&& (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
3742 sdev_printk(KERN_INFO
, SCpnt
->device
,
3743 "%s: command%s found\n", __func__
,
3749 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
3752 if (SCpnt
&& SCpnt
->device
) {
3753 struct scsi_device
*sdp
= SCpnt
->device
;
3754 struct sdebug_dev_info
*devip
=
3755 (struct sdebug_dev_info
*)sdp
->hostdata
;
3757 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3758 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3760 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
3765 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
3767 struct sdebug_host_info
*sdbg_host
;
3768 struct sdebug_dev_info
*devip
;
3769 struct scsi_device
*sdp
;
3770 struct Scsi_Host
*hp
;
3773 ++num_target_resets
;
3776 sdp
= SCpnt
->device
;
3779 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3780 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3784 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3786 list_for_each_entry(devip
,
3787 &sdbg_host
->dev_info_list
,
3789 if (devip
->target
== sdp
->id
) {
3790 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3794 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3795 sdev_printk(KERN_INFO
, sdp
,
3796 "%s: %d device(s) found in target\n", __func__
, k
);
3801 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
3803 struct sdebug_host_info
*sdbg_host
;
3804 struct sdebug_dev_info
*devip
;
3805 struct scsi_device
* sdp
;
3806 struct Scsi_Host
* hp
;
3810 if (!(SCpnt
&& SCpnt
->device
))
3812 sdp
= SCpnt
->device
;
3813 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3814 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3817 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3819 list_for_each_entry(devip
,
3820 &sdbg_host
->dev_info_list
,
3822 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3827 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3828 sdev_printk(KERN_INFO
, sdp
,
3829 "%s: %d device(s) found in host\n", __func__
, k
);
3834 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
3836 struct sdebug_host_info
* sdbg_host
;
3837 struct sdebug_dev_info
*devip
;
3841 if ((SCpnt
->device
) && (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
3842 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
3843 spin_lock(&sdebug_host_list_lock
);
3844 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
3845 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
3847 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3851 spin_unlock(&sdebug_host_list_lock
);
3853 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3854 sdev_printk(KERN_INFO
, SCpnt
->device
,
3855 "%s: %d device(s) found\n", __func__
, k
);
3859 static void __init
sdebug_build_parts(unsigned char *ramp
,
3860 unsigned long store_size
)
3862 struct partition
* pp
;
3863 int starts
[SDEBUG_MAX_PARTS
+ 2];
3864 int sectors_per_part
, num_sectors
, k
;
3865 int heads_by_sects
, start_sec
, end_sec
;
3867 /* assume partition table already zeroed */
3868 if ((sdebug_num_parts
< 1) || (store_size
< 1048576))
3870 if (sdebug_num_parts
> SDEBUG_MAX_PARTS
) {
3871 sdebug_num_parts
= SDEBUG_MAX_PARTS
;
3872 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
3874 num_sectors
= (int)sdebug_store_sectors
;
3875 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
3877 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
3878 starts
[0] = sdebug_sectors_per
;
3879 for (k
= 1; k
< sdebug_num_parts
; ++k
)
3880 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
3882 starts
[sdebug_num_parts
] = num_sectors
;
3883 starts
[sdebug_num_parts
+ 1] = 0;
3885 ramp
[510] = 0x55; /* magic partition markings */
3887 pp
= (struct partition
*)(ramp
+ 0x1be);
3888 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
3889 start_sec
= starts
[k
];
3890 end_sec
= starts
[k
+ 1] - 1;
3893 pp
->cyl
= start_sec
/ heads_by_sects
;
3894 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
3895 / sdebug_sectors_per
;
3896 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
3898 pp
->end_cyl
= end_sec
/ heads_by_sects
;
3899 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
3900 / sdebug_sectors_per
;
3901 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
3903 pp
->start_sect
= cpu_to_le32(start_sec
);
3904 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
3905 pp
->sys_ind
= 0x83; /* plain Linux partition */
3909 static void block_unblock_all_queues(bool block
)
3912 struct sdebug_queue
*sqp
;
3914 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
)
3915 atomic_set(&sqp
->blocked
, (int)block
);
3918 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3919 * commands will be processed normally before triggers occur.
3921 static void tweak_cmnd_count(void)
3925 modulo
= abs(sdebug_every_nth
);
3928 block_unblock_all_queues(true);
3929 count
= atomic_read(&sdebug_cmnd_count
);
3930 atomic_set(&sdebug_cmnd_count
, (count
/ modulo
) * modulo
);
3931 block_unblock_all_queues(false);
3934 static void clear_queue_stats(void)
3936 atomic_set(&sdebug_cmnd_count
, 0);
3937 atomic_set(&sdebug_completions
, 0);
3938 atomic_set(&sdebug_miss_cpus
, 0);
3939 atomic_set(&sdebug_a_tsf
, 0);
3942 static void setup_inject(struct sdebug_queue
*sqp
,
3943 struct sdebug_queued_cmd
*sqcp
)
3945 if ((atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) > 0)
3947 sqcp
->inj_recovered
= !!(SDEBUG_OPT_RECOVERED_ERR
& sdebug_opts
);
3948 sqcp
->inj_transport
= !!(SDEBUG_OPT_TRANSPORT_ERR
& sdebug_opts
);
3949 sqcp
->inj_dif
= !!(SDEBUG_OPT_DIF_ERR
& sdebug_opts
);
3950 sqcp
->inj_dix
= !!(SDEBUG_OPT_DIX_ERR
& sdebug_opts
);
3951 sqcp
->inj_short
= !!(SDEBUG_OPT_SHORT_TRANSFER
& sdebug_opts
);
3954 /* Complete the processing of the thread that queued a SCSI command to this
3955 * driver. It either completes the command by calling cmnd_done() or
3956 * schedules a hr timer or work queue then returns 0. Returns
3957 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3959 static int schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
3960 int scsi_result
, int delta_jiff
)
3962 unsigned long iflags
;
3963 int k
, num_in_q
, qdepth
, inject
;
3964 struct sdebug_queue
*sqp
;
3965 struct sdebug_queued_cmd
*sqcp
;
3966 struct scsi_device
*sdp
;
3967 struct sdebug_defer
*sd_dp
;
3969 if (unlikely(devip
== NULL
)) {
3970 if (scsi_result
== 0)
3971 scsi_result
= DID_NO_CONNECT
<< 16;
3972 goto respond_in_thread
;
3976 if (unlikely(sdebug_verbose
&& scsi_result
))
3977 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
3978 __func__
, scsi_result
);
3979 if (delta_jiff
== 0)
3980 goto respond_in_thread
;
3982 /* schedule the response at a later time if resources permit */
3983 sqp
= get_queue(cmnd
);
3984 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3985 if (unlikely(atomic_read(&sqp
->blocked
))) {
3986 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3987 return SCSI_MLQUEUE_HOST_BUSY
;
3989 num_in_q
= atomic_read(&devip
->num_in_q
);
3990 qdepth
= cmnd
->device
->queue_depth
;
3992 if (unlikely((qdepth
> 0) && (num_in_q
>= qdepth
))) {
3994 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3995 goto respond_in_thread
;
3997 scsi_result
= device_qfull_result
;
3998 } else if (unlikely(sdebug_every_nth
&&
3999 (SDEBUG_OPT_RARE_TSF
& sdebug_opts
) &&
4000 (scsi_result
== 0))) {
4001 if ((num_in_q
== (qdepth
- 1)) &&
4002 (atomic_inc_return(&sdebug_a_tsf
) >=
4003 abs(sdebug_every_nth
))) {
4004 atomic_set(&sdebug_a_tsf
, 0);
4006 scsi_result
= device_qfull_result
;
4010 k
= find_first_zero_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4011 if (unlikely(k
>= sdebug_max_queue
)) {
4012 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4014 goto respond_in_thread
;
4015 else if (SDEBUG_OPT_ALL_TSF
& sdebug_opts
)
4016 scsi_result
= device_qfull_result
;
4017 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
)
4018 sdev_printk(KERN_INFO
, sdp
,
4019 "%s: max_queue=%d exceeded, %s\n",
4020 __func__
, sdebug_max_queue
,
4021 (scsi_result
? "status: TASK SET FULL" :
4022 "report: host busy"));
4024 goto respond_in_thread
;
4026 return SCSI_MLQUEUE_HOST_BUSY
;
4028 __set_bit(k
, sqp
->in_use_bm
);
4029 atomic_inc(&devip
->num_in_q
);
4030 sqcp
= &sqp
->qc_arr
[k
];
4031 sqcp
->a_cmnd
= cmnd
;
4032 cmnd
->host_scribble
= (unsigned char *)sqcp
;
4033 cmnd
->result
= scsi_result
;
4034 sd_dp
= sqcp
->sd_dp
;
4035 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4036 if (unlikely(sdebug_every_nth
&& sdebug_any_injecting_opt
))
4037 setup_inject(sqp
, sqcp
);
4038 if (delta_jiff
> 0 || sdebug_ndelay
> 0) {
4041 if (delta_jiff
> 0) {
4044 jiffies_to_timespec(delta_jiff
, &ts
);
4045 kt
= ktime_set(ts
.tv_sec
, ts
.tv_nsec
);
4047 kt
= ktime_set(0, sdebug_ndelay
);
4048 if (NULL
== sd_dp
) {
4049 sd_dp
= kzalloc(sizeof(*sd_dp
), GFP_ATOMIC
);
4051 return SCSI_MLQUEUE_HOST_BUSY
;
4052 sqcp
->sd_dp
= sd_dp
;
4053 hrtimer_init(&sd_dp
->hrt
, CLOCK_MONOTONIC
,
4054 HRTIMER_MODE_REL_PINNED
);
4055 sd_dp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
4056 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4059 if (sdebug_statistics
)
4060 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4061 hrtimer_start(&sd_dp
->hrt
, kt
, HRTIMER_MODE_REL_PINNED
);
4062 } else { /* jdelay < 0, use work queue */
4063 if (NULL
== sd_dp
) {
4064 sd_dp
= kzalloc(sizeof(*sqcp
->sd_dp
), GFP_ATOMIC
);
4066 return SCSI_MLQUEUE_HOST_BUSY
;
4067 sqcp
->sd_dp
= sd_dp
;
4068 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4070 INIT_WORK(&sd_dp
->ew
.work
, sdebug_q_cmd_wq_complete
);
4072 if (sdebug_statistics
)
4073 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4074 schedule_work(&sd_dp
->ew
.work
);
4076 if (unlikely((SDEBUG_OPT_Q_NOISE
& sdebug_opts
) &&
4077 (scsi_result
== device_qfull_result
)))
4078 sdev_printk(KERN_INFO
, sdp
,
4079 "%s: num_in_q=%d +1, %s%s\n", __func__
,
4080 num_in_q
, (inject
? "<inject> " : ""),
4081 "status: TASK SET FULL");
4084 respond_in_thread
: /* call back to mid-layer using invocation thread */
4085 cmnd
->result
= scsi_result
;
4086 cmnd
->scsi_done(cmnd
);
4090 /* Note: The following macros create attribute files in the
4091 /sys/module/scsi_debug/parameters directory. Unfortunately this
4092 driver is unaware of a change and cannot trigger auxiliary actions
4093 as it can when the corresponding attribute in the
4094 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4096 module_param_named(add_host
, sdebug_add_host
, int, S_IRUGO
| S_IWUSR
);
4097 module_param_named(ato
, sdebug_ato
, int, S_IRUGO
);
4098 module_param_named(clustering
, sdebug_clustering
, bool, S_IRUGO
| S_IWUSR
);
4099 module_param_named(delay
, sdebug_jdelay
, int, S_IRUGO
| S_IWUSR
);
4100 module_param_named(dev_size_mb
, sdebug_dev_size_mb
, int, S_IRUGO
);
4101 module_param_named(dif
, sdebug_dif
, int, S_IRUGO
);
4102 module_param_named(dix
, sdebug_dix
, int, S_IRUGO
);
4103 module_param_named(dsense
, sdebug_dsense
, int, S_IRUGO
| S_IWUSR
);
4104 module_param_named(every_nth
, sdebug_every_nth
, int, S_IRUGO
| S_IWUSR
);
4105 module_param_named(fake_rw
, sdebug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
4106 module_param_named(guard
, sdebug_guard
, uint
, S_IRUGO
);
4107 module_param_named(host_lock
, sdebug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
4108 module_param_named(lbpu
, sdebug_lbpu
, int, S_IRUGO
);
4109 module_param_named(lbpws
, sdebug_lbpws
, int, S_IRUGO
);
4110 module_param_named(lbpws10
, sdebug_lbpws10
, int, S_IRUGO
);
4111 module_param_named(lbprz
, sdebug_lbprz
, int, S_IRUGO
);
4112 module_param_named(lowest_aligned
, sdebug_lowest_aligned
, int, S_IRUGO
);
4113 module_param_named(max_luns
, sdebug_max_luns
, int, S_IRUGO
| S_IWUSR
);
4114 module_param_named(max_queue
, sdebug_max_queue
, int, S_IRUGO
| S_IWUSR
);
4115 module_param_named(ndelay
, sdebug_ndelay
, int, S_IRUGO
| S_IWUSR
);
4116 module_param_named(no_lun_0
, sdebug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
4117 module_param_named(no_uld
, sdebug_no_uld
, int, S_IRUGO
);
4118 module_param_named(num_parts
, sdebug_num_parts
, int, S_IRUGO
);
4119 module_param_named(num_tgts
, sdebug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
4120 module_param_named(opt_blks
, sdebug_opt_blks
, int, S_IRUGO
);
4121 module_param_named(opts
, sdebug_opts
, int, S_IRUGO
| S_IWUSR
);
4122 module_param_named(physblk_exp
, sdebug_physblk_exp
, int, S_IRUGO
);
4123 module_param_named(ptype
, sdebug_ptype
, int, S_IRUGO
| S_IWUSR
);
4124 module_param_named(removable
, sdebug_removable
, bool, S_IRUGO
| S_IWUSR
);
4125 module_param_named(scsi_level
, sdebug_scsi_level
, int, S_IRUGO
);
4126 module_param_named(sector_size
, sdebug_sector_size
, int, S_IRUGO
);
4127 module_param_named(statistics
, sdebug_statistics
, bool, S_IRUGO
| S_IWUSR
);
4128 module_param_named(strict
, sdebug_strict
, bool, S_IRUGO
| S_IWUSR
);
4129 module_param_named(submit_queues
, submit_queues
, int, S_IRUGO
);
4130 module_param_named(unmap_alignment
, sdebug_unmap_alignment
, int, S_IRUGO
);
4131 module_param_named(unmap_granularity
, sdebug_unmap_granularity
, int, S_IRUGO
);
4132 module_param_named(unmap_max_blocks
, sdebug_unmap_max_blocks
, int, S_IRUGO
);
4133 module_param_named(unmap_max_desc
, sdebug_unmap_max_desc
, int, S_IRUGO
);
4134 module_param_named(virtual_gb
, sdebug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
4135 module_param_named(uuid_ctl
, sdebug_uuid_ctl
, int, S_IRUGO
);
4136 module_param_named(vpd_use_hostno
, sdebug_vpd_use_hostno
, int,
4138 module_param_named(write_same_length
, sdebug_write_same_length
, int,
4141 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4142 MODULE_DESCRIPTION("SCSI debug adapter driver");
4143 MODULE_LICENSE("GPL");
4144 MODULE_VERSION(SDEBUG_VERSION
);
4146 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
4147 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
4148 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
4149 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4150 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
4151 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
4152 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
4153 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
4154 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
4155 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
4156 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
4157 MODULE_PARM_DESC(host_lock
, "host_lock is ignored (def=0)");
4158 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
4159 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4160 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4161 MODULE_PARM_DESC(lbprz
,
4162 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4163 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
4164 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
4165 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
4166 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
4167 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
4168 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
4169 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
4170 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
4171 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in blocks (def=1024)");
4172 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4173 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
4174 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
4175 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
4176 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=7[SPC-5])");
4177 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
4178 MODULE_PARM_DESC(statistics
, "collect statistics on commands, queues (def=0)");
4179 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
4180 MODULE_PARM_DESC(submit_queues
, "support for block multi-queue (def=1)");
4181 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
4182 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
4183 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4184 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
4185 MODULE_PARM_DESC(uuid_ctl
,
4186 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4187 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4188 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4189 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4191 #define SDEBUG_INFO_LEN 256
4192 static char sdebug_info
[SDEBUG_INFO_LEN
];
4194 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
4198 k
= scnprintf(sdebug_info
, SDEBUG_INFO_LEN
, "%s: version %s [%s]\n",
4199 my_name
, SDEBUG_VERSION
, sdebug_version_date
);
4200 if (k
>= (SDEBUG_INFO_LEN
- 1))
4202 scnprintf(sdebug_info
+ k
, SDEBUG_INFO_LEN
- k
,
4203 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4204 sdebug_dev_size_mb
, sdebug_opts
, submit_queues
,
4205 "statistics", (int)sdebug_statistics
);
4209 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4210 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
,
4215 int minLen
= length
> 15 ? 15 : length
;
4217 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
4219 memcpy(arr
, buffer
, minLen
);
4221 if (1 != sscanf(arr
, "%d", &opts
))
4224 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4225 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4226 if (sdebug_every_nth
!= 0)
4231 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4232 * same for each scsi_debug host (if more than one). Some of the counters
4233 * output are not atomics so might be inaccurate in a busy system. */
4234 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
4237 struct sdebug_queue
*sqp
;
4239 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n",
4240 SDEBUG_VERSION
, sdebug_version_date
);
4241 seq_printf(m
, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4242 sdebug_num_tgts
, "shared (ram) ", sdebug_dev_size_mb
,
4243 sdebug_opts
, sdebug_every_nth
);
4244 seq_printf(m
, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4245 sdebug_jdelay
, sdebug_ndelay
, sdebug_max_luns
,
4246 sdebug_sector_size
, "bytes");
4247 seq_printf(m
, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4248 sdebug_cylinders_per
, sdebug_heads
, sdebug_sectors_per
,
4250 seq_printf(m
, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4251 num_dev_resets
, num_target_resets
, num_bus_resets
,
4253 seq_printf(m
, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4254 dix_reads
, dix_writes
, dif_errors
);
4255 seq_printf(m
, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4256 TICK_NSEC
/ 1000, "statistics", sdebug_statistics
,
4258 seq_printf(m
, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4259 atomic_read(&sdebug_cmnd_count
),
4260 atomic_read(&sdebug_completions
),
4261 "miss_cpus", atomic_read(&sdebug_miss_cpus
),
4262 atomic_read(&sdebug_a_tsf
));
4264 seq_printf(m
, "submit_queues=%d\n", submit_queues
);
4265 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4266 seq_printf(m
, " queue %d:\n", j
);
4267 f
= find_first_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4268 if (f
!= sdebug_max_queue
) {
4269 l
= find_last_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4270 seq_printf(m
, " in_use_bm BUSY: %s: %d,%d\n",
4271 "first,last bits", f
, l
);
4277 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
4279 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_jdelay
);
4281 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4282 * of delay is jiffies.
4284 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
4289 if (count
> 0 && sscanf(buf
, "%d", &jdelay
) == 1) {
4291 if (sdebug_jdelay
!= jdelay
) {
4293 struct sdebug_queue
*sqp
;
4295 block_unblock_all_queues(true);
4296 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4298 k
= find_first_bit(sqp
->in_use_bm
,
4300 if (k
!= sdebug_max_queue
) {
4301 res
= -EBUSY
; /* queued commands */
4306 /* make sure sdebug_defer instances get
4307 * re-allocated for new delay variant */
4309 sdebug_jdelay
= jdelay
;
4312 block_unblock_all_queues(false);
4318 static DRIVER_ATTR_RW(delay
);
4320 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
4322 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ndelay
);
4324 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4325 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4326 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
4331 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
4332 (ndelay
>= 0) && (ndelay
< (1000 * 1000 * 1000))) {
4334 if (sdebug_ndelay
!= ndelay
) {
4336 struct sdebug_queue
*sqp
;
4338 block_unblock_all_queues(true);
4339 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4341 k
= find_first_bit(sqp
->in_use_bm
,
4343 if (k
!= sdebug_max_queue
) {
4344 res
= -EBUSY
; /* queued commands */
4349 /* make sure sdebug_defer instances get
4350 * re-allocated for new delay variant */
4352 sdebug_ndelay
= ndelay
;
4353 sdebug_jdelay
= ndelay
? JDELAY_OVERRIDDEN
4356 block_unblock_all_queues(false);
4362 static DRIVER_ATTR_RW(ndelay
);
4364 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
4366 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", sdebug_opts
);
4369 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
4375 if (1 == sscanf(buf
, "%10s", work
)) {
4376 if (0 == strncasecmp(work
,"0x", 2)) {
4377 if (1 == sscanf(&work
[2], "%x", &opts
))
4380 if (1 == sscanf(work
, "%d", &opts
))
4387 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4388 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4392 static DRIVER_ATTR_RW(opts
);
4394 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
4396 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ptype
);
4398 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
4403 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4409 static DRIVER_ATTR_RW(ptype
);
4411 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
4413 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dsense
);
4415 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
4420 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4426 static DRIVER_ATTR_RW(dsense
);
4428 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
4430 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_fake_rw
);
4432 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
4437 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4439 sdebug_fake_rw
= (sdebug_fake_rw
> 0);
4440 if (sdebug_fake_rw
!= n
) {
4441 if ((0 == n
) && (NULL
== fake_storep
)) {
4443 (unsigned long)sdebug_dev_size_mb
*
4446 fake_storep
= vmalloc(sz
);
4447 if (NULL
== fake_storep
) {
4448 pr_err("out of memory, 9\n");
4451 memset(fake_storep
, 0, sz
);
4459 static DRIVER_ATTR_RW(fake_rw
);
4461 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
4463 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_lun_0
);
4465 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
4470 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4471 sdebug_no_lun_0
= n
;
4476 static DRIVER_ATTR_RW(no_lun_0
);
4478 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
4480 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_tgts
);
4482 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
4487 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4488 sdebug_num_tgts
= n
;
4489 sdebug_max_tgts_luns();
4494 static DRIVER_ATTR_RW(num_tgts
);
4496 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
4498 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dev_size_mb
);
4500 static DRIVER_ATTR_RO(dev_size_mb
);
4502 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
4504 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_parts
);
4506 static DRIVER_ATTR_RO(num_parts
);
4508 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
4510 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_every_nth
);
4512 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
4517 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
4518 sdebug_every_nth
= nth
;
4519 if (nth
&& !sdebug_statistics
) {
4520 pr_info("every_nth needs statistics=1, set it\n");
4521 sdebug_statistics
= true;
4528 static DRIVER_ATTR_RW(every_nth
);
4530 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
4532 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_luns
);
4534 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
4540 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4542 pr_warn("max_luns can be no more than 256\n");
4545 changed
= (sdebug_max_luns
!= n
);
4546 sdebug_max_luns
= n
;
4547 sdebug_max_tgts_luns();
4548 if (changed
&& (sdebug_scsi_level
>= 5)) { /* >= SPC-3 */
4549 struct sdebug_host_info
*sdhp
;
4550 struct sdebug_dev_info
*dp
;
4552 spin_lock(&sdebug_host_list_lock
);
4553 list_for_each_entry(sdhp
, &sdebug_host_list
,
4555 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4557 set_bit(SDEBUG_UA_LUNS_CHANGED
,
4561 spin_unlock(&sdebug_host_list_lock
);
4567 static DRIVER_ATTR_RW(max_luns
);
4569 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
4571 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_queue
);
4573 /* N.B. max_queue can be changed while there are queued commands. In flight
4574 * commands beyond the new max_queue will be completed. */
4575 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
4579 struct sdebug_queue
*sqp
;
4581 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
4582 (n
<= SDEBUG_CANQUEUE
)) {
4583 block_unblock_all_queues(true);
4585 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4587 a
= find_last_bit(sqp
->in_use_bm
, SDEBUG_CANQUEUE
);
4591 sdebug_max_queue
= n
;
4592 if (k
== SDEBUG_CANQUEUE
)
4593 atomic_set(&retired_max_queue
, 0);
4595 atomic_set(&retired_max_queue
, k
+ 1);
4597 atomic_set(&retired_max_queue
, 0);
4598 block_unblock_all_queues(false);
4603 static DRIVER_ATTR_RW(max_queue
);
4605 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
4607 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_uld
);
4609 static DRIVER_ATTR_RO(no_uld
);
4611 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
4613 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_scsi_level
);
4615 static DRIVER_ATTR_RO(scsi_level
);
4617 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
4619 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_virtual_gb
);
4621 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
4627 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4628 changed
= (sdebug_virtual_gb
!= n
);
4629 sdebug_virtual_gb
= n
;
4630 sdebug_capacity
= get_sdebug_capacity();
4632 struct sdebug_host_info
*sdhp
;
4633 struct sdebug_dev_info
*dp
;
4635 spin_lock(&sdebug_host_list_lock
);
4636 list_for_each_entry(sdhp
, &sdebug_host_list
,
4638 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4640 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
4644 spin_unlock(&sdebug_host_list_lock
);
4650 static DRIVER_ATTR_RW(virtual_gb
);
4652 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
4654 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_add_host
);
4657 static int sdebug_add_adapter(void);
4658 static void sdebug_remove_adapter(void);
4660 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
4665 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
4667 if (delta_hosts
> 0) {
4669 sdebug_add_adapter();
4670 } while (--delta_hosts
);
4671 } else if (delta_hosts
< 0) {
4673 sdebug_remove_adapter();
4674 } while (++delta_hosts
);
4678 static DRIVER_ATTR_RW(add_host
);
4680 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
4682 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_vpd_use_hostno
);
4684 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
4689 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4690 sdebug_vpd_use_hostno
= n
;
4695 static DRIVER_ATTR_RW(vpd_use_hostno
);
4697 static ssize_t
statistics_show(struct device_driver
*ddp
, char *buf
)
4699 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_statistics
);
4701 static ssize_t
statistics_store(struct device_driver
*ddp
, const char *buf
,
4706 if ((count
> 0) && (sscanf(buf
, "%d", &n
) == 1) && (n
>= 0)) {
4708 sdebug_statistics
= true;
4710 clear_queue_stats();
4711 sdebug_statistics
= false;
4717 static DRIVER_ATTR_RW(statistics
);
4719 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
4721 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_sector_size
);
4723 static DRIVER_ATTR_RO(sector_size
);
4725 static ssize_t
submit_queues_show(struct device_driver
*ddp
, char *buf
)
4727 return scnprintf(buf
, PAGE_SIZE
, "%d\n", submit_queues
);
4729 static DRIVER_ATTR_RO(submit_queues
);
4731 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
4733 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dix
);
4735 static DRIVER_ATTR_RO(dix
);
4737 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
4739 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dif
);
4741 static DRIVER_ATTR_RO(dif
);
4743 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
4745 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_guard
);
4747 static DRIVER_ATTR_RO(guard
);
4749 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
4751 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ato
);
4753 static DRIVER_ATTR_RO(ato
);
4755 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
4759 if (!scsi_debug_lbp())
4760 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
4761 sdebug_store_sectors
);
4763 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
4764 (int)map_size
, map_storep
);
4765 buf
[count
++] = '\n';
4770 static DRIVER_ATTR_RO(map
);
4772 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
4774 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_removable
? 1 : 0);
4776 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
4781 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4782 sdebug_removable
= (n
> 0);
4787 static DRIVER_ATTR_RW(removable
);
4789 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
4791 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_host_lock
);
4793 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4794 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
4799 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4800 sdebug_host_lock
= (n
> 0);
4805 static DRIVER_ATTR_RW(host_lock
);
4807 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
4809 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_strict
);
4811 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
4816 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4817 sdebug_strict
= (n
> 0);
4822 static DRIVER_ATTR_RW(strict
);
4824 static ssize_t
uuid_ctl_show(struct device_driver
*ddp
, char *buf
)
4826 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_uuid_ctl
);
4828 static DRIVER_ATTR_RO(uuid_ctl
);
4831 /* Note: The following array creates attribute files in the
4832 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4833 files (over those found in the /sys/module/scsi_debug/parameters
4834 directory) is that auxiliary actions can be triggered when an attribute
4835 is changed. For example see: sdebug_add_host_store() above.
4838 static struct attribute
*sdebug_drv_attrs
[] = {
4839 &driver_attr_delay
.attr
,
4840 &driver_attr_opts
.attr
,
4841 &driver_attr_ptype
.attr
,
4842 &driver_attr_dsense
.attr
,
4843 &driver_attr_fake_rw
.attr
,
4844 &driver_attr_no_lun_0
.attr
,
4845 &driver_attr_num_tgts
.attr
,
4846 &driver_attr_dev_size_mb
.attr
,
4847 &driver_attr_num_parts
.attr
,
4848 &driver_attr_every_nth
.attr
,
4849 &driver_attr_max_luns
.attr
,
4850 &driver_attr_max_queue
.attr
,
4851 &driver_attr_no_uld
.attr
,
4852 &driver_attr_scsi_level
.attr
,
4853 &driver_attr_virtual_gb
.attr
,
4854 &driver_attr_add_host
.attr
,
4855 &driver_attr_vpd_use_hostno
.attr
,
4856 &driver_attr_sector_size
.attr
,
4857 &driver_attr_statistics
.attr
,
4858 &driver_attr_submit_queues
.attr
,
4859 &driver_attr_dix
.attr
,
4860 &driver_attr_dif
.attr
,
4861 &driver_attr_guard
.attr
,
4862 &driver_attr_ato
.attr
,
4863 &driver_attr_map
.attr
,
4864 &driver_attr_removable
.attr
,
4865 &driver_attr_host_lock
.attr
,
4866 &driver_attr_ndelay
.attr
,
4867 &driver_attr_strict
.attr
,
4868 &driver_attr_uuid_ctl
.attr
,
4871 ATTRIBUTE_GROUPS(sdebug_drv
);
4873 static struct device
*pseudo_primary
;
4875 static int __init
scsi_debug_init(void)
4882 atomic_set(&retired_max_queue
, 0);
4884 if (sdebug_ndelay
>= 1000 * 1000 * 1000) {
4885 pr_warn("ndelay must be less than 1 second, ignored\n");
4887 } else if (sdebug_ndelay
> 0)
4888 sdebug_jdelay
= JDELAY_OVERRIDDEN
;
4890 switch (sdebug_sector_size
) {
4897 pr_err("invalid sector_size %d\n", sdebug_sector_size
);
4901 switch (sdebug_dif
) {
4903 case SD_DIF_TYPE0_PROTECTION
:
4905 case SD_DIF_TYPE1_PROTECTION
:
4906 case SD_DIF_TYPE2_PROTECTION
:
4907 case SD_DIF_TYPE3_PROTECTION
:
4908 have_dif_prot
= true;
4912 pr_err("dif must be 0, 1, 2 or 3\n");
4916 if (sdebug_guard
> 1) {
4917 pr_err("guard must be 0 or 1\n");
4921 if (sdebug_ato
> 1) {
4922 pr_err("ato must be 0 or 1\n");
4926 if (sdebug_physblk_exp
> 15) {
4927 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp
);
4930 if (sdebug_max_luns
> 256) {
4931 pr_warn("max_luns can be no more than 256, use default\n");
4932 sdebug_max_luns
= DEF_MAX_LUNS
;
4935 if (sdebug_lowest_aligned
> 0x3fff) {
4936 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned
);
4940 if (submit_queues
< 1) {
4941 pr_err("submit_queues must be 1 or more\n");
4944 sdebug_q_arr
= kcalloc(submit_queues
, sizeof(struct sdebug_queue
),
4946 if (sdebug_q_arr
== NULL
)
4948 for (k
= 0; k
< submit_queues
; ++k
)
4949 spin_lock_init(&sdebug_q_arr
[k
].qc_lock
);
4951 if (sdebug_dev_size_mb
< 1)
4952 sdebug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
4953 sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
4954 sdebug_store_sectors
= sz
/ sdebug_sector_size
;
4955 sdebug_capacity
= get_sdebug_capacity();
4957 /* play around with geometry, don't waste too much on track 0 */
4959 sdebug_sectors_per
= 32;
4960 if (sdebug_dev_size_mb
>= 256)
4962 else if (sdebug_dev_size_mb
>= 16)
4964 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4965 (sdebug_sectors_per
* sdebug_heads
);
4966 if (sdebug_cylinders_per
>= 1024) {
4967 /* other LLDs do this; implies >= 1GB ram disk ... */
4969 sdebug_sectors_per
= 63;
4970 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4971 (sdebug_sectors_per
* sdebug_heads
);
4974 if (sdebug_fake_rw
== 0) {
4975 fake_storep
= vmalloc(sz
);
4976 if (NULL
== fake_storep
) {
4977 pr_err("out of memory, 1\n");
4981 memset(fake_storep
, 0, sz
);
4982 if (sdebug_num_parts
> 0)
4983 sdebug_build_parts(fake_storep
, sz
);
4989 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
4990 dif_storep
= vmalloc(dif_size
);
4992 pr_err("dif_storep %u bytes @ %p\n", dif_size
, dif_storep
);
4994 if (dif_storep
== NULL
) {
4995 pr_err("out of mem. (DIX)\n");
5000 memset(dif_storep
, 0xff, dif_size
);
5003 /* Logical Block Provisioning */
5004 if (scsi_debug_lbp()) {
5005 sdebug_unmap_max_blocks
=
5006 clamp(sdebug_unmap_max_blocks
, 0U, 0xffffffffU
);
5008 sdebug_unmap_max_desc
=
5009 clamp(sdebug_unmap_max_desc
, 0U, 256U);
5011 sdebug_unmap_granularity
=
5012 clamp(sdebug_unmap_granularity
, 1U, 0xffffffffU
);
5014 if (sdebug_unmap_alignment
&&
5015 sdebug_unmap_granularity
<=
5016 sdebug_unmap_alignment
) {
5017 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5022 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
5023 map_storep
= vmalloc(BITS_TO_LONGS(map_size
) * sizeof(long));
5025 pr_info("%lu provisioning blocks\n", map_size
);
5027 if (map_storep
== NULL
) {
5028 pr_err("out of mem. (MAP)\n");
5033 bitmap_zero(map_storep
, map_size
);
5035 /* Map first 1KB for partition table */
5036 if (sdebug_num_parts
)
5040 pseudo_primary
= root_device_register("pseudo_0");
5041 if (IS_ERR(pseudo_primary
)) {
5042 pr_warn("root_device_register() error\n");
5043 ret
= PTR_ERR(pseudo_primary
);
5046 ret
= bus_register(&pseudo_lld_bus
);
5048 pr_warn("bus_register error: %d\n", ret
);
5051 ret
= driver_register(&sdebug_driverfs_driver
);
5053 pr_warn("driver_register error: %d\n", ret
);
5057 host_to_add
= sdebug_add_host
;
5058 sdebug_add_host
= 0;
5060 for (k
= 0; k
< host_to_add
; k
++) {
5061 if (sdebug_add_adapter()) {
5062 pr_err("sdebug_add_adapter failed k=%d\n", k
);
5068 pr_info("built %d host(s)\n", sdebug_add_host
);
5073 bus_unregister(&pseudo_lld_bus
);
5075 root_device_unregister(pseudo_primary
);
5081 kfree(sdebug_q_arr
);
5085 static void __exit
scsi_debug_exit(void)
5087 int k
= sdebug_add_host
;
5092 sdebug_remove_adapter();
5093 driver_unregister(&sdebug_driverfs_driver
);
5094 bus_unregister(&pseudo_lld_bus
);
5095 root_device_unregister(pseudo_primary
);
5099 kfree(sdebug_q_arr
);
5102 device_initcall(scsi_debug_init
);
5103 module_exit(scsi_debug_exit
);
5105 static void sdebug_release_adapter(struct device
* dev
)
5107 struct sdebug_host_info
*sdbg_host
;
5109 sdbg_host
= to_sdebug_host(dev
);
5113 static int sdebug_add_adapter(void)
5115 int k
, devs_per_host
;
5117 struct sdebug_host_info
*sdbg_host
;
5118 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5120 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
5121 if (NULL
== sdbg_host
) {
5122 pr_err("out of memory at line %d\n", __LINE__
);
5126 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
5128 devs_per_host
= sdebug_num_tgts
* sdebug_max_luns
;
5129 for (k
= 0; k
< devs_per_host
; k
++) {
5130 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
5131 if (!sdbg_devinfo
) {
5132 pr_err("out of memory at line %d\n", __LINE__
);
5138 spin_lock(&sdebug_host_list_lock
);
5139 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
5140 spin_unlock(&sdebug_host_list_lock
);
5142 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
5143 sdbg_host
->dev
.parent
= pseudo_primary
;
5144 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
5145 dev_set_name(&sdbg_host
->dev
, "adapter%d", sdebug_add_host
);
5147 error
= device_register(&sdbg_host
->dev
);
5156 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5158 list_del(&sdbg_devinfo
->dev_list
);
5159 kfree(sdbg_devinfo
);
5166 static void sdebug_remove_adapter(void)
5168 struct sdebug_host_info
* sdbg_host
= NULL
;
5170 spin_lock(&sdebug_host_list_lock
);
5171 if (!list_empty(&sdebug_host_list
)) {
5172 sdbg_host
= list_entry(sdebug_host_list
.prev
,
5173 struct sdebug_host_info
, host_list
);
5174 list_del(&sdbg_host
->host_list
);
5176 spin_unlock(&sdebug_host_list_lock
);
5181 device_unregister(&sdbg_host
->dev
);
5185 static int sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
5188 struct sdebug_dev_info
*devip
;
5190 block_unblock_all_queues(true);
5191 devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
5192 if (NULL
== devip
) {
5193 block_unblock_all_queues(false);
5196 num_in_q
= atomic_read(&devip
->num_in_q
);
5200 /* allow to exceed max host qc_arr elements for testing */
5201 if (qdepth
> SDEBUG_CANQUEUE
+ 10)
5202 qdepth
= SDEBUG_CANQUEUE
+ 10;
5203 scsi_change_queue_depth(sdev
, qdepth
);
5205 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
) {
5206 sdev_printk(KERN_INFO
, sdev
, "%s: qdepth=%d, num_in_q=%d\n",
5207 __func__
, qdepth
, num_in_q
);
5209 block_unblock_all_queues(false);
5210 return sdev
->queue_depth
;
5213 static bool fake_timeout(struct scsi_cmnd
*scp
)
5215 if (0 == (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
))) {
5216 if (sdebug_every_nth
< -1)
5217 sdebug_every_nth
= -1;
5218 if (SDEBUG_OPT_TIMEOUT
& sdebug_opts
)
5219 return true; /* ignore command causing timeout */
5220 else if (SDEBUG_OPT_MAC_TIMEOUT
& sdebug_opts
&&
5221 scsi_medium_access_command(scp
))
5222 return true; /* time out reads and writes */
5227 static int scsi_debug_queuecommand(struct Scsi_Host
*shost
,
5228 struct scsi_cmnd
*scp
)
5231 struct scsi_device
*sdp
= scp
->device
;
5232 const struct opcode_info_t
*oip
;
5233 const struct opcode_info_t
*r_oip
;
5234 struct sdebug_dev_info
*devip
;
5235 u8
*cmd
= scp
->cmnd
;
5236 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
5244 scsi_set_resid(scp
, 0);
5245 if (sdebug_statistics
)
5246 atomic_inc(&sdebug_cmnd_count
);
5247 if (unlikely(sdebug_verbose
&&
5248 !(SDEBUG_OPT_NO_CDB_NOISE
& sdebug_opts
))) {
5253 sb
= (int)sizeof(b
);
5255 strcpy(b
, "too long, over 32 bytes");
5257 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
5258 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
5261 if (sdebug_mq_active
)
5262 sdev_printk(KERN_INFO
, sdp
, "%s: tag=%u, cmd %s\n",
5263 my_name
, blk_mq_unique_tag(scp
->request
),
5266 sdev_printk(KERN_INFO
, sdp
, "%s: cmd %s\n", my_name
,
5269 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
5270 if (unlikely((sdp
->lun
>= sdebug_max_luns
) && !has_wlun_rl
))
5273 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
5274 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
5275 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5276 if (unlikely(!devip
)) {
5277 devip
= find_build_dev_info(sdp
);
5281 na
= oip
->num_attached
;
5283 if (na
) { /* multiple commands with this opcode */
5285 if (FF_SA
& r_oip
->flags
) {
5286 if (F_SA_LOW
& oip
->flags
)
5289 sa
= get_unaligned_be16(cmd
+ 8);
5290 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5291 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
5294 } else { /* since no service action only check opcode */
5295 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5296 if (opcode
== oip
->opcode
)
5301 if (F_SA_LOW
& r_oip
->flags
)
5302 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
5303 else if (F_SA_HIGH
& r_oip
->flags
)
5304 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
5306 mk_sense_invalid_opcode(scp
);
5309 } /* else (when na==0) we assume the oip is a match */
5311 if (unlikely(F_INV_OP
& flags
)) {
5312 mk_sense_invalid_opcode(scp
);
5315 if (unlikely(has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
))) {
5317 sdev_printk(KERN_INFO
, sdp
, "%s: Opcode 0x%x not%s\n",
5318 my_name
, opcode
, " supported for wlun");
5319 mk_sense_invalid_opcode(scp
);
5322 if (unlikely(sdebug_strict
)) { /* check cdb against mask */
5326 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
5327 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
5329 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
5333 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
5338 if (unlikely(!(F_SKIP_UA
& flags
) &&
5339 find_first_bit(devip
->uas_bm
,
5340 SDEBUG_NUM_UAS
) != SDEBUG_NUM_UAS
)) {
5341 errsts
= make_ua(scp
, devip
);
5345 if (unlikely((F_M_ACCESS
& flags
) && atomic_read(&devip
->stopped
))) {
5346 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
5348 sdev_printk(KERN_INFO
, sdp
, "%s reports: Not ready: "
5349 "%s\n", my_name
, "initializing command "
5351 errsts
= check_condition_result
;
5354 if (sdebug_fake_rw
&& (F_FAKE_RW
& flags
))
5356 if (unlikely(sdebug_every_nth
)) {
5357 if (fake_timeout(scp
))
5358 return 0; /* ignore command: make trouble */
5360 if (likely(oip
->pfp
))
5361 errsts
= oip
->pfp(scp
, devip
); /* calls a resp_* function */
5362 else if (r_pfp
) /* if leaf function ptr NULL, try the root's */
5363 errsts
= r_pfp(scp
, devip
);
5366 return schedule_resp(scp
, devip
, errsts
,
5367 ((F_DELAY_OVERR
& flags
) ? 0 : sdebug_jdelay
));
5369 return schedule_resp(scp
, devip
, check_condition_result
, 0);
5371 return schedule_resp(scp
, NULL
, DID_NO_CONNECT
<< 16, 0);
5374 static struct scsi_host_template sdebug_driver_template
= {
5375 .show_info
= scsi_debug_show_info
,
5376 .write_info
= scsi_debug_write_info
,
5377 .proc_name
= sdebug_proc_name
,
5378 .name
= "SCSI DEBUG",
5379 .info
= scsi_debug_info
,
5380 .slave_alloc
= scsi_debug_slave_alloc
,
5381 .slave_configure
= scsi_debug_slave_configure
,
5382 .slave_destroy
= scsi_debug_slave_destroy
,
5383 .ioctl
= scsi_debug_ioctl
,
5384 .queuecommand
= scsi_debug_queuecommand
,
5385 .change_queue_depth
= sdebug_change_qdepth
,
5386 .eh_abort_handler
= scsi_debug_abort
,
5387 .eh_device_reset_handler
= scsi_debug_device_reset
,
5388 .eh_target_reset_handler
= scsi_debug_target_reset
,
5389 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
5390 .eh_host_reset_handler
= scsi_debug_host_reset
,
5391 .can_queue
= SDEBUG_CANQUEUE
,
5393 .sg_tablesize
= SG_MAX_SEGMENTS
,
5394 .cmd_per_lun
= DEF_CMD_PER_LUN
,
5396 .use_clustering
= DISABLE_CLUSTERING
,
5397 .module
= THIS_MODULE
,
5398 .track_queue_depth
= 1,
5401 static int sdebug_driver_probe(struct device
* dev
)
5404 struct sdebug_host_info
*sdbg_host
;
5405 struct Scsi_Host
*hpnt
;
5408 sdbg_host
= to_sdebug_host(dev
);
5410 sdebug_driver_template
.can_queue
= sdebug_max_queue
;
5411 if (sdebug_clustering
)
5412 sdebug_driver_template
.use_clustering
= ENABLE_CLUSTERING
;
5413 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
5415 pr_err("scsi_host_alloc failed\n");
5419 if (submit_queues
> nr_cpu_ids
) {
5420 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5421 my_name
, submit_queues
, nr_cpu_ids
);
5422 submit_queues
= nr_cpu_ids
;
5424 /* Decide whether to tell scsi subsystem that we want mq */
5425 /* Following should give the same answer for each host */
5426 sdebug_mq_active
= shost_use_blk_mq(hpnt
) && (submit_queues
> 1);
5427 if (sdebug_mq_active
)
5428 hpnt
->nr_hw_queues
= submit_queues
;
5430 sdbg_host
->shost
= hpnt
;
5431 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
5432 if ((hpnt
->this_id
>= 0) && (sdebug_num_tgts
> hpnt
->this_id
))
5433 hpnt
->max_id
= sdebug_num_tgts
+ 1;
5435 hpnt
->max_id
= sdebug_num_tgts
;
5436 /* = sdebug_max_luns; */
5437 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
5441 switch (sdebug_dif
) {
5443 case SD_DIF_TYPE1_PROTECTION
:
5444 hprot
= SHOST_DIF_TYPE1_PROTECTION
;
5446 hprot
|= SHOST_DIX_TYPE1_PROTECTION
;
5449 case SD_DIF_TYPE2_PROTECTION
:
5450 hprot
= SHOST_DIF_TYPE2_PROTECTION
;
5452 hprot
|= SHOST_DIX_TYPE2_PROTECTION
;
5455 case SD_DIF_TYPE3_PROTECTION
:
5456 hprot
= SHOST_DIF_TYPE3_PROTECTION
;
5458 hprot
|= SHOST_DIX_TYPE3_PROTECTION
;
5463 hprot
|= SHOST_DIX_TYPE0_PROTECTION
;
5467 scsi_host_set_prot(hpnt
, hprot
);
5469 if (have_dif_prot
|| sdebug_dix
)
5470 pr_info("host protection%s%s%s%s%s%s%s\n",
5471 (hprot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
5472 (hprot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
5473 (hprot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
5474 (hprot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
5475 (hprot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
5476 (hprot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
5477 (hprot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
5479 if (sdebug_guard
== 1)
5480 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
5482 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
5484 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& sdebug_opts
);
5485 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& sdebug_opts
);
5486 if (sdebug_every_nth
) /* need stats counters for every_nth */
5487 sdebug_statistics
= true;
5488 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
5490 pr_err("scsi_add_host failed\n");
5492 scsi_host_put(hpnt
);
5494 scsi_scan_host(hpnt
);
5499 static int sdebug_driver_remove(struct device
* dev
)
5501 struct sdebug_host_info
*sdbg_host
;
5502 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5504 sdbg_host
= to_sdebug_host(dev
);
5507 pr_err("Unable to locate host info\n");
5511 scsi_remove_host(sdbg_host
->shost
);
5513 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5515 list_del(&sdbg_devinfo
->dev_list
);
5516 kfree(sdbg_devinfo
);
5519 scsi_host_put(sdbg_host
->shost
);
5523 static int pseudo_lld_bus_match(struct device
*dev
,
5524 struct device_driver
*dev_driver
)
5529 static struct bus_type pseudo_lld_bus
= {
5531 .match
= pseudo_lld_bus_match
,
5532 .probe
= sdebug_driver_probe
,
5533 .remove
= sdebug_driver_remove
,
5534 .drv_groups
= sdebug_drv_groups
,