scsi_debug: uuid for lu name
[deliverable/linux.git] / drivers / scsi / scsi_debug.c
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 *
9 * Copyright (C) 2001 - 2016 Douglas Gilbert
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 *
18 */
19
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22
23 #include <linux/module.h>
24
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45
46 #include <net/checksum.h>
47
48 #include <asm/unaligned.h>
49
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58
59 #include "sd.h"
60 #include "scsi_logging.h"
61
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "1.86"
64 static const char *sdebug_version_date = "20160430";
65
66 #define MY_NAME "scsi_debug"
67
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95
96 /* Additional Sense Code Qualifier (ASCQ) */
97 #define ACK_NAK_TO 0x3
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105 */
106 #define DEF_ATO 1
107 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
124 #define DEF_OPTS 0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
143
144 #define SDEBUG_LUN_0_VAL 0
145
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE 1
148 #define SDEBUG_OPT_MEDIUM_ERR 2
149 #define SDEBUG_OPT_TIMEOUT 4
150 #define SDEBUG_OPT_RECOVERED_ERR 8
151 #define SDEBUG_OPT_TRANSPORT_ERR 16
152 #define SDEBUG_OPT_DIF_ERR 32
153 #define SDEBUG_OPT_DIX_ERR 64
154 #define SDEBUG_OPT_MAC_TIMEOUT 128
155 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
156 #define SDEBUG_OPT_Q_NOISE 0x200
157 #define SDEBUG_OPT_ALL_TSF 0x400
158 #define SDEBUG_OPT_RARE_TSF 0x800
159 #define SDEBUG_OPT_N_WCE 0x1000
160 #define SDEBUG_OPT_RESET_NOISE 0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
162 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
163 SDEBUG_OPT_RESET_NOISE)
164 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
165 SDEBUG_OPT_TRANSPORT_ERR | \
166 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
167 SDEBUG_OPT_SHORT_TRANSFER)
168 /* When "every_nth" > 0 then modulo "every_nth" commands:
169 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
170 * - a RECOVERED_ERROR is simulated on successful read and write
171 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
172 * - a TRANSPORT_ERROR is simulated on successful read and write
173 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
174 *
175 * When "every_nth" < 0 then after "- every_nth" commands:
176 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
177 * - a RECOVERED_ERROR is simulated on successful read and write
178 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
179 * - a TRANSPORT_ERROR is simulated on successful read and write
180 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
181 * This will continue on every subsequent command until some other action
182 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
183 * every_nth via sysfs).
184 */
185
186 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
187 * priority order. In the subset implemented here lower numbers have higher
188 * priority. The UA numbers should be a sequence starting from 0 with
189 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
190 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
191 #define SDEBUG_UA_BUS_RESET 1
192 #define SDEBUG_UA_MODE_CHANGED 2
193 #define SDEBUG_UA_CAPACITY_CHANGED 3
194 #define SDEBUG_UA_LUNS_CHANGED 4
195 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
196 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
197 #define SDEBUG_NUM_UAS 7
198
199 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200 * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
203
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205 * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
207
208 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
209 * (for response) per submit queue at one time. Can be reduced by max_queue
210 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
211 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
212 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
213 * but cannot exceed SDEBUG_CANQUEUE .
214 */
215 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
216 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
217 #define DEF_CMD_PER_LUN 255
218
219 #define F_D_IN 1
220 #define F_D_OUT 2
221 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
222 #define F_D_UNKN 8
223 #define F_RL_WLUN_OK 0x10
224 #define F_SKIP_UA 0x20
225 #define F_DELAY_OVERR 0x40
226 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
227 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
228 #define F_INV_OP 0x200
229 #define F_FAKE_RW 0x400
230 #define F_M_ACCESS 0x800 /* media access */
231
232 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
233 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
234 #define FF_SA (F_SA_HIGH | F_SA_LOW)
235
236 #define SDEBUG_MAX_PARTS 4
237
238 #define SDEBUG_MAX_CMD_LEN 32
239
240
241 struct sdebug_dev_info {
242 struct list_head dev_list;
243 unsigned int channel;
244 unsigned int target;
245 u64 lun;
246 uuid_be lu_name;
247 struct sdebug_host_info *sdbg_host;
248 unsigned long uas_bm[1];
249 atomic_t num_in_q;
250 atomic_t stopped;
251 bool used;
252 };
253
254 struct sdebug_host_info {
255 struct list_head host_list;
256 struct Scsi_Host *shost;
257 struct device dev;
258 struct list_head dev_info_list;
259 };
260
261 #define to_sdebug_host(d) \
262 container_of(d, struct sdebug_host_info, dev)
263
264 struct sdebug_defer {
265 struct hrtimer hrt;
266 struct execute_work ew;
267 int sqa_idx; /* index of sdebug_queue array */
268 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
269 int issuing_cpu;
270 };
271
272 struct sdebug_queued_cmd {
273 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
274 * instance indicates this slot is in use.
275 */
276 struct sdebug_defer *sd_dp;
277 struct scsi_cmnd *a_cmnd;
278 unsigned int inj_recovered:1;
279 unsigned int inj_transport:1;
280 unsigned int inj_dif:1;
281 unsigned int inj_dix:1;
282 unsigned int inj_short:1;
283 };
284
285 struct sdebug_queue {
286 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
287 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
288 spinlock_t qc_lock;
289 atomic_t blocked; /* to temporarily stop more being queued */
290 };
291
292 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
293 static atomic_t sdebug_completions; /* count of deferred completions */
294 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
295 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
296
297 struct opcode_info_t {
298 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
299 /* for terminating element */
300 u8 opcode; /* if num_attached > 0, preferred */
301 u16 sa; /* service action */
302 u32 flags; /* OR-ed set of SDEB_F_* */
303 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
304 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
305 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
306 /* ignore cdb bytes after position 15 */
307 };
308
309 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
310 enum sdeb_opcode_index {
311 SDEB_I_INVALID_OPCODE = 0,
312 SDEB_I_INQUIRY = 1,
313 SDEB_I_REPORT_LUNS = 2,
314 SDEB_I_REQUEST_SENSE = 3,
315 SDEB_I_TEST_UNIT_READY = 4,
316 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
317 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
318 SDEB_I_LOG_SENSE = 7,
319 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
320 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
321 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
322 SDEB_I_START_STOP = 11,
323 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
324 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
325 SDEB_I_MAINT_IN = 14,
326 SDEB_I_MAINT_OUT = 15,
327 SDEB_I_VERIFY = 16, /* 10 only */
328 SDEB_I_VARIABLE_LEN = 17,
329 SDEB_I_RESERVE = 18, /* 6, 10 */
330 SDEB_I_RELEASE = 19, /* 6, 10 */
331 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
332 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
333 SDEB_I_ATA_PT = 22, /* 12, 16 */
334 SDEB_I_SEND_DIAG = 23,
335 SDEB_I_UNMAP = 24,
336 SDEB_I_XDWRITEREAD = 25, /* 10 only */
337 SDEB_I_WRITE_BUFFER = 26,
338 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
339 SDEB_I_SYNC_CACHE = 28, /* 10 only */
340 SDEB_I_COMP_WRITE = 29,
341 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
342 };
343
344
345 static const unsigned char opcode_ind_arr[256] = {
346 /* 0x0; 0x0->0x1f: 6 byte cdbs */
347 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
348 0, 0, 0, 0,
349 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
350 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
351 SDEB_I_RELEASE,
352 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
353 SDEB_I_ALLOW_REMOVAL, 0,
354 /* 0x20; 0x20->0x3f: 10 byte cdbs */
355 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
356 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
357 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
358 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
359 /* 0x40; 0x40->0x5f: 10 byte cdbs */
360 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
362 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
363 SDEB_I_RELEASE,
364 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
365 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
366 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 0, SDEB_I_VARIABLE_LEN,
369 /* 0x80; 0x80->0x9f: 16 byte cdbs */
370 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
371 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
372 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
373 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
374 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
375 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
376 SDEB_I_MAINT_OUT, 0, 0, 0,
377 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
378 0, 0, 0, 0,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 /* 0xc0; 0xc0->0xff: vendor specific */
382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 };
387
388 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
389 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
390 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
391 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
409
410 static const struct opcode_info_t msense_iarr[1] = {
411 {0, 0x1a, 0, F_D_IN, NULL, NULL,
412 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
413 };
414
415 static const struct opcode_info_t mselect_iarr[1] = {
416 {0, 0x15, 0, F_D_OUT, NULL, NULL,
417 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 };
419
420 static const struct opcode_info_t read_iarr[3] = {
421 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
422 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
423 0, 0, 0, 0} },
424 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
425 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
427 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
428 0xc7, 0, 0, 0, 0} },
429 };
430
431 static const struct opcode_info_t write_iarr[3] = {
432 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
433 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
434 0, 0, 0, 0} },
435 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
436 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
438 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
439 0xc7, 0, 0, 0, 0} },
440 };
441
442 static const struct opcode_info_t sa_in_iarr[1] = {
443 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
444 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 0xff, 0xff, 0xff, 0, 0xc7} },
446 };
447
448 static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
449 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
450 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
451 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
452 };
453
454 static const struct opcode_info_t maint_in_iarr[2] = {
455 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
456 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
457 0xc7, 0, 0, 0, 0} },
458 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
459 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
460 0, 0} },
461 };
462
463 static const struct opcode_info_t write_same_iarr[1] = {
464 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
465 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
466 0xff, 0xff, 0xff, 0x1f, 0xc7} },
467 };
468
469 static const struct opcode_info_t reserve_iarr[1] = {
470 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
471 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472 };
473
474 static const struct opcode_info_t release_iarr[1] = {
475 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
476 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 };
478
479
480 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
481 * plus the terminating elements for logic that scans this table such as
482 * REPORT SUPPORTED OPERATION CODES. */
483 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
484 /* 0 */
485 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
486 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
487 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
488 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
490 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
491 0, 0} },
492 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
493 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
495 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
497 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
498 0} },
499 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
500 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
501 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
502 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
503 0, 0, 0} },
504 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
505 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
506 0, 0} },
507 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
508 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
509 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
510 /* 10 */
511 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
512 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
514 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
515 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
517 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
519 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
520 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
522 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
523 0} },
524 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
525 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
528 0, 0, 0, 0, 0, 0} },
529 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
530 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
531 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
532 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
533 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
534 0} },
535 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
536 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
537 0} },
538 /* 20 */
539 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
540 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
541 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
542 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
544 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
546 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
548 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
549 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
550 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
551 0, 0, 0, 0, 0, 0} },
552 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
553 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
554 0, 0, 0, 0} }, /* WRITE_BUFFER */
555 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
556 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
557 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
558 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
559 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
560 0, 0, 0, 0} },
561 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
562 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
563 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
564
565 /* 30 */
566 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
567 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
568 };
569
570 static int sdebug_add_host = DEF_NUM_HOST;
571 static int sdebug_ato = DEF_ATO;
572 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
573 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
574 static int sdebug_dif = DEF_DIF;
575 static int sdebug_dix = DEF_DIX;
576 static int sdebug_dsense = DEF_D_SENSE;
577 static int sdebug_every_nth = DEF_EVERY_NTH;
578 static int sdebug_fake_rw = DEF_FAKE_RW;
579 static unsigned int sdebug_guard = DEF_GUARD;
580 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
581 static int sdebug_max_luns = DEF_MAX_LUNS;
582 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
583 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
584 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
585 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
586 static int sdebug_no_uld;
587 static int sdebug_num_parts = DEF_NUM_PARTS;
588 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
589 static int sdebug_opt_blks = DEF_OPT_BLKS;
590 static int sdebug_opts = DEF_OPTS;
591 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
592 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
593 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
594 static int sdebug_sector_size = DEF_SECTOR_SIZE;
595 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
596 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
597 static unsigned int sdebug_lbpu = DEF_LBPU;
598 static unsigned int sdebug_lbpws = DEF_LBPWS;
599 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
600 static unsigned int sdebug_lbprz = DEF_LBPRZ;
601 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
602 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
603 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
604 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
605 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
606 static int sdebug_uuid_ctl = DEF_UUID_CTL;
607 static bool sdebug_removable = DEF_REMOVABLE;
608 static bool sdebug_clustering;
609 static bool sdebug_host_lock = DEF_HOST_LOCK;
610 static bool sdebug_strict = DEF_STRICT;
611 static bool sdebug_any_injecting_opt;
612 static bool sdebug_verbose;
613 static bool have_dif_prot;
614 static bool sdebug_statistics = DEF_STATISTICS;
615 static bool sdebug_mq_active;
616
617 static unsigned int sdebug_store_sectors;
618 static sector_t sdebug_capacity; /* in sectors */
619
620 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
621 may still need them */
622 static int sdebug_heads; /* heads per disk */
623 static int sdebug_cylinders_per; /* cylinders per surface */
624 static int sdebug_sectors_per; /* sectors per cylinder */
625
626 static LIST_HEAD(sdebug_host_list);
627 static DEFINE_SPINLOCK(sdebug_host_list_lock);
628
629 static unsigned char *fake_storep; /* ramdisk storage */
630 static struct sd_dif_tuple *dif_storep; /* protection info */
631 static void *map_storep; /* provisioning map */
632
633 static unsigned long map_size;
634 static int num_aborts;
635 static int num_dev_resets;
636 static int num_target_resets;
637 static int num_bus_resets;
638 static int num_host_resets;
639 static int dix_writes;
640 static int dix_reads;
641 static int dif_errors;
642
643 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
644 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
645
646 static DEFINE_RWLOCK(atomic_rw);
647
648 static char sdebug_proc_name[] = MY_NAME;
649 static const char *my_name = MY_NAME;
650
651 static struct bus_type pseudo_lld_bus;
652
653 static struct device_driver sdebug_driverfs_driver = {
654 .name = sdebug_proc_name,
655 .bus = &pseudo_lld_bus,
656 };
657
658 static const int check_condition_result =
659 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
660
661 static const int illegal_condition_result =
662 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
663
664 static const int device_qfull_result =
665 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
666
667
668 /* Only do the extra work involved in logical block provisioning if one or
669 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
670 * real reads and writes (i.e. not skipping them for speed).
671 */
672 static inline bool scsi_debug_lbp(void)
673 {
674 return 0 == sdebug_fake_rw &&
675 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
676 }
677
678 static void *fake_store(unsigned long long lba)
679 {
680 lba = do_div(lba, sdebug_store_sectors);
681
682 return fake_storep + lba * sdebug_sector_size;
683 }
684
685 static struct sd_dif_tuple *dif_store(sector_t sector)
686 {
687 sector = sector_div(sector, sdebug_store_sectors);
688
689 return dif_storep + sector;
690 }
691
692 static void sdebug_max_tgts_luns(void)
693 {
694 struct sdebug_host_info *sdbg_host;
695 struct Scsi_Host *hpnt;
696
697 spin_lock(&sdebug_host_list_lock);
698 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
699 hpnt = sdbg_host->shost;
700 if ((hpnt->this_id >= 0) &&
701 (sdebug_num_tgts > hpnt->this_id))
702 hpnt->max_id = sdebug_num_tgts + 1;
703 else
704 hpnt->max_id = sdebug_num_tgts;
705 /* sdebug_max_luns; */
706 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
707 }
708 spin_unlock(&sdebug_host_list_lock);
709 }
710
711 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
712
713 /* Set in_bit to -1 to indicate no bit position of invalid field */
714 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
715 enum sdeb_cmd_data c_d,
716 int in_byte, int in_bit)
717 {
718 unsigned char *sbuff;
719 u8 sks[4];
720 int sl, asc;
721
722 sbuff = scp->sense_buffer;
723 if (!sbuff) {
724 sdev_printk(KERN_ERR, scp->device,
725 "%s: sense_buffer is NULL\n", __func__);
726 return;
727 }
728 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
729 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
730 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
731 memset(sks, 0, sizeof(sks));
732 sks[0] = 0x80;
733 if (c_d)
734 sks[0] |= 0x40;
735 if (in_bit >= 0) {
736 sks[0] |= 0x8;
737 sks[0] |= 0x7 & in_bit;
738 }
739 put_unaligned_be16(in_byte, sks + 1);
740 if (sdebug_dsense) {
741 sl = sbuff[7] + 8;
742 sbuff[7] = sl;
743 sbuff[sl] = 0x2;
744 sbuff[sl + 1] = 0x6;
745 memcpy(sbuff + sl + 4, sks, 3);
746 } else
747 memcpy(sbuff + 15, sks, 3);
748 if (sdebug_verbose)
749 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
750 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
751 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
752 }
753
754 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
755 {
756 unsigned char *sbuff;
757
758 sbuff = scp->sense_buffer;
759 if (!sbuff) {
760 sdev_printk(KERN_ERR, scp->device,
761 "%s: sense_buffer is NULL\n", __func__);
762 return;
763 }
764 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
765
766 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
767
768 if (sdebug_verbose)
769 sdev_printk(KERN_INFO, scp->device,
770 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
771 my_name, key, asc, asq);
772 }
773
774 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
775 {
776 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
777 }
778
779 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
780 {
781 if (sdebug_verbose) {
782 if (0x1261 == cmd)
783 sdev_printk(KERN_INFO, dev,
784 "%s: BLKFLSBUF [0x1261]\n", __func__);
785 else if (0x5331 == cmd)
786 sdev_printk(KERN_INFO, dev,
787 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
788 __func__);
789 else
790 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
791 __func__, cmd);
792 }
793 return -EINVAL;
794 /* return -ENOTTY; // correct return but upsets fdisk */
795 }
796
797 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
798 {
799 struct sdebug_host_info *sdhp;
800 struct sdebug_dev_info *dp;
801
802 spin_lock(&sdebug_host_list_lock);
803 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
804 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
805 if ((devip->sdbg_host == dp->sdbg_host) &&
806 (devip->target == dp->target))
807 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
808 }
809 }
810 spin_unlock(&sdebug_host_list_lock);
811 }
812
813 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
814 {
815 int k;
816
817 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
818 if (k != SDEBUG_NUM_UAS) {
819 const char *cp = NULL;
820
821 switch (k) {
822 case SDEBUG_UA_POR:
823 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
824 POWER_ON_RESET_ASCQ);
825 if (sdebug_verbose)
826 cp = "power on reset";
827 break;
828 case SDEBUG_UA_BUS_RESET:
829 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
830 BUS_RESET_ASCQ);
831 if (sdebug_verbose)
832 cp = "bus reset";
833 break;
834 case SDEBUG_UA_MODE_CHANGED:
835 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
836 MODE_CHANGED_ASCQ);
837 if (sdebug_verbose)
838 cp = "mode parameters changed";
839 break;
840 case SDEBUG_UA_CAPACITY_CHANGED:
841 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
842 CAPACITY_CHANGED_ASCQ);
843 if (sdebug_verbose)
844 cp = "capacity data changed";
845 break;
846 case SDEBUG_UA_MICROCODE_CHANGED:
847 mk_sense_buffer(scp, UNIT_ATTENTION,
848 TARGET_CHANGED_ASC,
849 MICROCODE_CHANGED_ASCQ);
850 if (sdebug_verbose)
851 cp = "microcode has been changed";
852 break;
853 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
854 mk_sense_buffer(scp, UNIT_ATTENTION,
855 TARGET_CHANGED_ASC,
856 MICROCODE_CHANGED_WO_RESET_ASCQ);
857 if (sdebug_verbose)
858 cp = "microcode has been changed without reset";
859 break;
860 case SDEBUG_UA_LUNS_CHANGED:
861 /*
862 * SPC-3 behavior is to report a UNIT ATTENTION with
863 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 * on the target, until a REPORT LUNS command is
865 * received. SPC-4 behavior is to report it only once.
866 * NOTE: sdebug_scsi_level does not use the same
867 * values as struct scsi_device->scsi_level.
868 */
869 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
870 clear_luns_changed_on_target(devip);
871 mk_sense_buffer(scp, UNIT_ATTENTION,
872 TARGET_CHANGED_ASC,
873 LUNS_CHANGED_ASCQ);
874 if (sdebug_verbose)
875 cp = "reported luns data has changed";
876 break;
877 default:
878 pr_warn("unexpected unit attention code=%d\n", k);
879 if (sdebug_verbose)
880 cp = "unknown";
881 break;
882 }
883 clear_bit(k, devip->uas_bm);
884 if (sdebug_verbose)
885 sdev_printk(KERN_INFO, scp->device,
886 "%s reports: Unit attention: %s\n",
887 my_name, cp);
888 return check_condition_result;
889 }
890 return 0;
891 }
892
893 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
894 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
895 int arr_len)
896 {
897 int act_len;
898 struct scsi_data_buffer *sdb = scsi_in(scp);
899
900 if (!sdb->length)
901 return 0;
902 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
903 return DID_ERROR << 16;
904
905 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
906 arr, arr_len);
907 sdb->resid = scsi_bufflen(scp) - act_len;
908
909 return 0;
910 }
911
912 /* Returns number of bytes fetched into 'arr' or -1 if error. */
913 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
914 int arr_len)
915 {
916 if (!scsi_bufflen(scp))
917 return 0;
918 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
919 return -1;
920
921 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
922 }
923
924
925 static const char * inq_vendor_id = "Linux ";
926 static const char * inq_product_id = "scsi_debug ";
927 static const char *inq_product_rev = "0186"; /* version less '.' */
928 static const u64 naa5_comp_a = 0x5222222000000000ULL;
929 static const u64 naa5_comp_b = 0x5333333000000000ULL;
930 static const u64 naa5_comp_c = 0x5111111000000000ULL;
931
932 /* Device identification VPD page. Returns number of bytes placed in arr */
933 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
934 int target_dev_id, int dev_id_num,
935 const char *dev_id_str, int dev_id_str_len,
936 const uuid_be *lu_name)
937 {
938 int num, port_a;
939 char b[32];
940
941 port_a = target_dev_id + 1;
942 /* T10 vendor identifier field format (faked) */
943 arr[0] = 0x2; /* ASCII */
944 arr[1] = 0x1;
945 arr[2] = 0x0;
946 memcpy(&arr[4], inq_vendor_id, 8);
947 memcpy(&arr[12], inq_product_id, 16);
948 memcpy(&arr[28], dev_id_str, dev_id_str_len);
949 num = 8 + 16 + dev_id_str_len;
950 arr[3] = num;
951 num += 4;
952 if (dev_id_num >= 0) {
953 if (sdebug_uuid_ctl) {
954 /* Locally assigned UUID */
955 arr[num++] = 0x1; /* binary (not necessarily sas) */
956 arr[num++] = 0xa; /* PIV=0, lu, naa */
957 arr[num++] = 0x0;
958 arr[num++] = 0x12;
959 arr[num++] = 0x10; /* uuid type=1, locally assigned */
960 arr[num++] = 0x0;
961 memcpy(arr + num, lu_name, 16);
962 num += 16;
963 } else {
964 /* NAA-5, Logical unit identifier (binary) */
965 arr[num++] = 0x1; /* binary (not necessarily sas) */
966 arr[num++] = 0x3; /* PIV=0, lu, naa */
967 arr[num++] = 0x0;
968 arr[num++] = 0x8;
969 put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
970 num += 8;
971 }
972 /* Target relative port number */
973 arr[num++] = 0x61; /* proto=sas, binary */
974 arr[num++] = 0x94; /* PIV=1, target port, rel port */
975 arr[num++] = 0x0; /* reserved */
976 arr[num++] = 0x4; /* length */
977 arr[num++] = 0x0; /* reserved */
978 arr[num++] = 0x0; /* reserved */
979 arr[num++] = 0x0;
980 arr[num++] = 0x1; /* relative port A */
981 }
982 /* NAA-5, Target port identifier */
983 arr[num++] = 0x61; /* proto=sas, binary */
984 arr[num++] = 0x93; /* piv=1, target port, naa */
985 arr[num++] = 0x0;
986 arr[num++] = 0x8;
987 put_unaligned_be64(naa5_comp_a + port_a, arr + num);
988 num += 8;
989 /* NAA-5, Target port group identifier */
990 arr[num++] = 0x61; /* proto=sas, binary */
991 arr[num++] = 0x95; /* piv=1, target port group id */
992 arr[num++] = 0x0;
993 arr[num++] = 0x4;
994 arr[num++] = 0;
995 arr[num++] = 0;
996 put_unaligned_be16(port_group_id, arr + num);
997 num += 2;
998 /* NAA-5, Target device identifier */
999 arr[num++] = 0x61; /* proto=sas, binary */
1000 arr[num++] = 0xa3; /* piv=1, target device, naa */
1001 arr[num++] = 0x0;
1002 arr[num++] = 0x8;
1003 put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
1004 num += 8;
1005 /* SCSI name string: Target device identifier */
1006 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1007 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1008 arr[num++] = 0x0;
1009 arr[num++] = 24;
1010 memcpy(arr + num, "naa.52222220", 12);
1011 num += 12;
1012 snprintf(b, sizeof(b), "%08X", target_dev_id);
1013 memcpy(arr + num, b, 8);
1014 num += 8;
1015 memset(arr + num, 0, 4);
1016 num += 4;
1017 return num;
1018 }
1019
1020 static unsigned char vpd84_data[] = {
1021 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1022 0x22,0x22,0x22,0x0,0xbb,0x1,
1023 0x22,0x22,0x22,0x0,0xbb,0x2,
1024 };
1025
1026 /* Software interface identification VPD page */
1027 static int inquiry_vpd_84(unsigned char *arr)
1028 {
1029 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1030 return sizeof(vpd84_data);
1031 }
1032
1033 /* Management network addresses VPD page */
1034 static int inquiry_vpd_85(unsigned char *arr)
1035 {
1036 int num = 0;
1037 const char * na1 = "https://www.kernel.org/config";
1038 const char * na2 = "http://www.kernel.org/log";
1039 int plen, olen;
1040
1041 arr[num++] = 0x1; /* lu, storage config */
1042 arr[num++] = 0x0; /* reserved */
1043 arr[num++] = 0x0;
1044 olen = strlen(na1);
1045 plen = olen + 1;
1046 if (plen % 4)
1047 plen = ((plen / 4) + 1) * 4;
1048 arr[num++] = plen; /* length, null termianted, padded */
1049 memcpy(arr + num, na1, olen);
1050 memset(arr + num + olen, 0, plen - olen);
1051 num += plen;
1052
1053 arr[num++] = 0x4; /* lu, logging */
1054 arr[num++] = 0x0; /* reserved */
1055 arr[num++] = 0x0;
1056 olen = strlen(na2);
1057 plen = olen + 1;
1058 if (plen % 4)
1059 plen = ((plen / 4) + 1) * 4;
1060 arr[num++] = plen; /* length, null terminated, padded */
1061 memcpy(arr + num, na2, olen);
1062 memset(arr + num + olen, 0, plen - olen);
1063 num += plen;
1064
1065 return num;
1066 }
1067
1068 /* SCSI ports VPD page */
1069 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1070 {
1071 int num = 0;
1072 int port_a, port_b;
1073
1074 port_a = target_dev_id + 1;
1075 port_b = port_a + 1;
1076 arr[num++] = 0x0; /* reserved */
1077 arr[num++] = 0x0; /* reserved */
1078 arr[num++] = 0x0;
1079 arr[num++] = 0x1; /* relative port 1 (primary) */
1080 memset(arr + num, 0, 6);
1081 num += 6;
1082 arr[num++] = 0x0;
1083 arr[num++] = 12; /* length tp descriptor */
1084 /* naa-5 target port identifier (A) */
1085 arr[num++] = 0x61; /* proto=sas, binary */
1086 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1087 arr[num++] = 0x0; /* reserved */
1088 arr[num++] = 0x8; /* length */
1089 put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1090 num += 8;
1091 arr[num++] = 0x0; /* reserved */
1092 arr[num++] = 0x0; /* reserved */
1093 arr[num++] = 0x0;
1094 arr[num++] = 0x2; /* relative port 2 (secondary) */
1095 memset(arr + num, 0, 6);
1096 num += 6;
1097 arr[num++] = 0x0;
1098 arr[num++] = 12; /* length tp descriptor */
1099 /* naa-5 target port identifier (B) */
1100 arr[num++] = 0x61; /* proto=sas, binary */
1101 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1102 arr[num++] = 0x0; /* reserved */
1103 arr[num++] = 0x8; /* length */
1104 put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1105 num += 8;
1106
1107 return num;
1108 }
1109
1110
1111 static unsigned char vpd89_data[] = {
1112 /* from 4th byte */ 0,0,0,0,
1113 'l','i','n','u','x',' ',' ',' ',
1114 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1115 '1','2','3','4',
1116 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1117 0xec,0,0,0,
1118 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1119 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1120 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1121 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1122 0x53,0x41,
1123 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1124 0x20,0x20,
1125 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1126 0x10,0x80,
1127 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1128 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1129 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1131 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1132 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1133 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1134 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1138 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1139 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1140 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1141 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1149 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1150 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1151 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1153 };
1154
1155 /* ATA Information VPD page */
1156 static int inquiry_vpd_89(unsigned char *arr)
1157 {
1158 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1159 return sizeof(vpd89_data);
1160 }
1161
1162
1163 static unsigned char vpdb0_data[] = {
1164 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1165 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 };
1169
1170 /* Block limits VPD page (SBC-3) */
1171 static int inquiry_vpd_b0(unsigned char *arr)
1172 {
1173 unsigned int gran;
1174
1175 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1176
1177 /* Optimal transfer length granularity */
1178 gran = 1 << sdebug_physblk_exp;
1179 put_unaligned_be16(gran, arr + 2);
1180
1181 /* Maximum Transfer Length */
1182 if (sdebug_store_sectors > 0x400)
1183 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1184
1185 /* Optimal Transfer Length */
1186 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1187
1188 if (sdebug_lbpu) {
1189 /* Maximum Unmap LBA Count */
1190 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1191
1192 /* Maximum Unmap Block Descriptor Count */
1193 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1194 }
1195
1196 /* Unmap Granularity Alignment */
1197 if (sdebug_unmap_alignment) {
1198 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1199 arr[28] |= 0x80; /* UGAVALID */
1200 }
1201
1202 /* Optimal Unmap Granularity */
1203 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1204
1205 /* Maximum WRITE SAME Length */
1206 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1207
1208 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1209
1210 return sizeof(vpdb0_data);
1211 }
1212
1213 /* Block device characteristics VPD page (SBC-3) */
1214 static int inquiry_vpd_b1(unsigned char *arr)
1215 {
1216 memset(arr, 0, 0x3c);
1217 arr[0] = 0;
1218 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1219 arr[2] = 0;
1220 arr[3] = 5; /* less than 1.8" */
1221
1222 return 0x3c;
1223 }
1224
1225 /* Logical block provisioning VPD page (SBC-4) */
1226 static int inquiry_vpd_b2(unsigned char *arr)
1227 {
1228 memset(arr, 0, 0x4);
1229 arr[0] = 0; /* threshold exponent */
1230 if (sdebug_lbpu)
1231 arr[1] = 1 << 7;
1232 if (sdebug_lbpws)
1233 arr[1] |= 1 << 6;
1234 if (sdebug_lbpws10)
1235 arr[1] |= 1 << 5;
1236 if (sdebug_lbprz && scsi_debug_lbp())
1237 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1238 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1239 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1240 /* threshold_percentage=0 */
1241 return 0x4;
1242 }
1243
1244 #define SDEBUG_LONG_INQ_SZ 96
1245 #define SDEBUG_MAX_INQ_ARR_SZ 584
1246
1247 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1248 {
1249 unsigned char pq_pdt;
1250 unsigned char * arr;
1251 unsigned char *cmd = scp->cmnd;
1252 int alloc_len, n, ret;
1253 bool have_wlun, is_disk;
1254
1255 alloc_len = get_unaligned_be16(cmd + 3);
1256 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1257 if (! arr)
1258 return DID_REQUEUE << 16;
1259 is_disk = (sdebug_ptype == TYPE_DISK);
1260 have_wlun = scsi_is_wlun(scp->device->lun);
1261 if (have_wlun)
1262 pq_pdt = TYPE_WLUN; /* present, wlun */
1263 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1264 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1265 else
1266 pq_pdt = (sdebug_ptype & 0x1f);
1267 arr[0] = pq_pdt;
1268 if (0x2 & cmd[1]) { /* CMDDT bit set */
1269 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1270 kfree(arr);
1271 return check_condition_result;
1272 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1273 int lu_id_num, port_group_id, target_dev_id, len;
1274 char lu_id_str[6];
1275 int host_no = devip->sdbg_host->shost->host_no;
1276
1277 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1278 (devip->channel & 0x7f);
1279 if (sdebug_vpd_use_hostno == 0)
1280 host_no = 0;
1281 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1282 (devip->target * 1000) + devip->lun);
1283 target_dev_id = ((host_no + 1) * 2000) +
1284 (devip->target * 1000) - 3;
1285 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1286 if (0 == cmd[2]) { /* supported vital product data pages */
1287 arr[1] = cmd[2]; /*sanity */
1288 n = 4;
1289 arr[n++] = 0x0; /* this page */
1290 arr[n++] = 0x80; /* unit serial number */
1291 arr[n++] = 0x83; /* device identification */
1292 arr[n++] = 0x84; /* software interface ident. */
1293 arr[n++] = 0x85; /* management network addresses */
1294 arr[n++] = 0x86; /* extended inquiry */
1295 arr[n++] = 0x87; /* mode page policy */
1296 arr[n++] = 0x88; /* SCSI ports */
1297 if (is_disk) { /* SBC only */
1298 arr[n++] = 0x89; /* ATA information */
1299 arr[n++] = 0xb0; /* Block limits */
1300 arr[n++] = 0xb1; /* Block characteristics */
1301 arr[n++] = 0xb2; /* Logical Block Prov */
1302 }
1303 arr[3] = n - 4; /* number of supported VPD pages */
1304 } else if (0x80 == cmd[2]) { /* unit serial number */
1305 arr[1] = cmd[2]; /*sanity */
1306 arr[3] = len;
1307 memcpy(&arr[4], lu_id_str, len);
1308 } else if (0x83 == cmd[2]) { /* device identification */
1309 arr[1] = cmd[2]; /*sanity */
1310 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1311 target_dev_id, lu_id_num,
1312 lu_id_str, len,
1313 &devip->lu_name);
1314 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1315 arr[1] = cmd[2]; /*sanity */
1316 arr[3] = inquiry_vpd_84(&arr[4]);
1317 } else if (0x85 == cmd[2]) { /* Management network addresses */
1318 arr[1] = cmd[2]; /*sanity */
1319 arr[3] = inquiry_vpd_85(&arr[4]);
1320 } else if (0x86 == cmd[2]) { /* extended inquiry */
1321 arr[1] = cmd[2]; /*sanity */
1322 arr[3] = 0x3c; /* number of following entries */
1323 if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1324 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1325 else if (have_dif_prot)
1326 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1327 else
1328 arr[4] = 0x0; /* no protection stuff */
1329 arr[5] = 0x7; /* head of q, ordered + simple q's */
1330 } else if (0x87 == cmd[2]) { /* mode page policy */
1331 arr[1] = cmd[2]; /*sanity */
1332 arr[3] = 0x8; /* number of following entries */
1333 arr[4] = 0x2; /* disconnect-reconnect mp */
1334 arr[6] = 0x80; /* mlus, shared */
1335 arr[8] = 0x18; /* protocol specific lu */
1336 arr[10] = 0x82; /* mlus, per initiator port */
1337 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1338 arr[1] = cmd[2]; /*sanity */
1339 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1340 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1341 arr[1] = cmd[2]; /*sanity */
1342 n = inquiry_vpd_89(&arr[4]);
1343 put_unaligned_be16(n, arr + 2);
1344 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1345 arr[1] = cmd[2]; /*sanity */
1346 arr[3] = inquiry_vpd_b0(&arr[4]);
1347 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1348 arr[1] = cmd[2]; /*sanity */
1349 arr[3] = inquiry_vpd_b1(&arr[4]);
1350 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1351 arr[1] = cmd[2]; /*sanity */
1352 arr[3] = inquiry_vpd_b2(&arr[4]);
1353 } else {
1354 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1355 kfree(arr);
1356 return check_condition_result;
1357 }
1358 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1359 ret = fill_from_dev_buffer(scp, arr,
1360 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1361 kfree(arr);
1362 return ret;
1363 }
1364 /* drops through here for a standard inquiry */
1365 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1366 arr[2] = sdebug_scsi_level;
1367 arr[3] = 2; /* response_data_format==2 */
1368 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1369 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1370 if (sdebug_vpd_use_hostno == 0)
1371 arr[5] = 0x10; /* claim: implicit TGPS */
1372 arr[6] = 0x10; /* claim: MultiP */
1373 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1374 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1375 memcpy(&arr[8], inq_vendor_id, 8);
1376 memcpy(&arr[16], inq_product_id, 16);
1377 memcpy(&arr[32], inq_product_rev, 4);
1378 /* version descriptors (2 bytes each) follow */
1379 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1380 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1381 n = 62;
1382 if (is_disk) { /* SBC-4 no version claimed */
1383 put_unaligned_be16(0x600, arr + n);
1384 n += 2;
1385 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1386 put_unaligned_be16(0x525, arr + n);
1387 n += 2;
1388 }
1389 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1390 ret = fill_from_dev_buffer(scp, arr,
1391 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1392 kfree(arr);
1393 return ret;
1394 }
1395
1396 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1397 0, 0, 0x0, 0x0};
1398
1399 static int resp_requests(struct scsi_cmnd * scp,
1400 struct sdebug_dev_info * devip)
1401 {
1402 unsigned char * sbuff;
1403 unsigned char *cmd = scp->cmnd;
1404 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1405 bool dsense;
1406 int len = 18;
1407
1408 memset(arr, 0, sizeof(arr));
1409 dsense = !!(cmd[1] & 1);
1410 sbuff = scp->sense_buffer;
1411 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1412 if (dsense) {
1413 arr[0] = 0x72;
1414 arr[1] = 0x0; /* NO_SENSE in sense_key */
1415 arr[2] = THRESHOLD_EXCEEDED;
1416 arr[3] = 0xff; /* TEST set and MRIE==6 */
1417 len = 8;
1418 } else {
1419 arr[0] = 0x70;
1420 arr[2] = 0x0; /* NO_SENSE in sense_key */
1421 arr[7] = 0xa; /* 18 byte sense buffer */
1422 arr[12] = THRESHOLD_EXCEEDED;
1423 arr[13] = 0xff; /* TEST set and MRIE==6 */
1424 }
1425 } else {
1426 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1427 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1428 ; /* have sense and formats match */
1429 else if (arr[0] <= 0x70) {
1430 if (dsense) {
1431 memset(arr, 0, 8);
1432 arr[0] = 0x72;
1433 len = 8;
1434 } else {
1435 memset(arr, 0, 18);
1436 arr[0] = 0x70;
1437 arr[7] = 0xa;
1438 }
1439 } else if (dsense) {
1440 memset(arr, 0, 8);
1441 arr[0] = 0x72;
1442 arr[1] = sbuff[2]; /* sense key */
1443 arr[2] = sbuff[12]; /* asc */
1444 arr[3] = sbuff[13]; /* ascq */
1445 len = 8;
1446 } else {
1447 memset(arr, 0, 18);
1448 arr[0] = 0x70;
1449 arr[2] = sbuff[1];
1450 arr[7] = 0xa;
1451 arr[12] = sbuff[1];
1452 arr[13] = sbuff[3];
1453 }
1454
1455 }
1456 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1457 return fill_from_dev_buffer(scp, arr, len);
1458 }
1459
1460 static int resp_start_stop(struct scsi_cmnd * scp,
1461 struct sdebug_dev_info * devip)
1462 {
1463 unsigned char *cmd = scp->cmnd;
1464 int power_cond, stop;
1465
1466 power_cond = (cmd[4] & 0xf0) >> 4;
1467 if (power_cond) {
1468 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1469 return check_condition_result;
1470 }
1471 stop = !(cmd[4] & 1);
1472 atomic_xchg(&devip->stopped, stop);
1473 return 0;
1474 }
1475
1476 static sector_t get_sdebug_capacity(void)
1477 {
1478 static const unsigned int gibibyte = 1073741824;
1479
1480 if (sdebug_virtual_gb > 0)
1481 return (sector_t)sdebug_virtual_gb *
1482 (gibibyte / sdebug_sector_size);
1483 else
1484 return sdebug_store_sectors;
1485 }
1486
1487 #define SDEBUG_READCAP_ARR_SZ 8
1488 static int resp_readcap(struct scsi_cmnd * scp,
1489 struct sdebug_dev_info * devip)
1490 {
1491 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1492 unsigned int capac;
1493
1494 /* following just in case virtual_gb changed */
1495 sdebug_capacity = get_sdebug_capacity();
1496 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1497 if (sdebug_capacity < 0xffffffff) {
1498 capac = (unsigned int)sdebug_capacity - 1;
1499 put_unaligned_be32(capac, arr + 0);
1500 } else
1501 put_unaligned_be32(0xffffffff, arr + 0);
1502 put_unaligned_be16(sdebug_sector_size, arr + 6);
1503 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1504 }
1505
1506 #define SDEBUG_READCAP16_ARR_SZ 32
1507 static int resp_readcap16(struct scsi_cmnd * scp,
1508 struct sdebug_dev_info * devip)
1509 {
1510 unsigned char *cmd = scp->cmnd;
1511 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1512 int alloc_len;
1513
1514 alloc_len = get_unaligned_be32(cmd + 10);
1515 /* following just in case virtual_gb changed */
1516 sdebug_capacity = get_sdebug_capacity();
1517 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1518 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1519 put_unaligned_be32(sdebug_sector_size, arr + 8);
1520 arr[13] = sdebug_physblk_exp & 0xf;
1521 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1522
1523 if (scsi_debug_lbp()) {
1524 arr[14] |= 0x80; /* LBPME */
1525 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1526 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1527 * in the wider field maps to 0 in this field.
1528 */
1529 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1530 arr[14] |= 0x40;
1531 }
1532
1533 arr[15] = sdebug_lowest_aligned & 0xff;
1534
1535 if (have_dif_prot) {
1536 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1537 arr[12] |= 1; /* PROT_EN */
1538 }
1539
1540 return fill_from_dev_buffer(scp, arr,
1541 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1542 }
1543
1544 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1545
1546 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1547 struct sdebug_dev_info * devip)
1548 {
1549 unsigned char *cmd = scp->cmnd;
1550 unsigned char * arr;
1551 int host_no = devip->sdbg_host->shost->host_no;
1552 int n, ret, alen, rlen;
1553 int port_group_a, port_group_b, port_a, port_b;
1554
1555 alen = get_unaligned_be32(cmd + 6);
1556 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1557 if (! arr)
1558 return DID_REQUEUE << 16;
1559 /*
1560 * EVPD page 0x88 states we have two ports, one
1561 * real and a fake port with no device connected.
1562 * So we create two port groups with one port each
1563 * and set the group with port B to unavailable.
1564 */
1565 port_a = 0x1; /* relative port A */
1566 port_b = 0x2; /* relative port B */
1567 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1568 (devip->channel & 0x7f);
1569 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1570 (devip->channel & 0x7f) + 0x80;
1571
1572 /*
1573 * The asymmetric access state is cycled according to the host_id.
1574 */
1575 n = 4;
1576 if (sdebug_vpd_use_hostno == 0) {
1577 arr[n++] = host_no % 3; /* Asymm access state */
1578 arr[n++] = 0x0F; /* claim: all states are supported */
1579 } else {
1580 arr[n++] = 0x0; /* Active/Optimized path */
1581 arr[n++] = 0x01; /* only support active/optimized paths */
1582 }
1583 put_unaligned_be16(port_group_a, arr + n);
1584 n += 2;
1585 arr[n++] = 0; /* Reserved */
1586 arr[n++] = 0; /* Status code */
1587 arr[n++] = 0; /* Vendor unique */
1588 arr[n++] = 0x1; /* One port per group */
1589 arr[n++] = 0; /* Reserved */
1590 arr[n++] = 0; /* Reserved */
1591 put_unaligned_be16(port_a, arr + n);
1592 n += 2;
1593 arr[n++] = 3; /* Port unavailable */
1594 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1595 put_unaligned_be16(port_group_b, arr + n);
1596 n += 2;
1597 arr[n++] = 0; /* Reserved */
1598 arr[n++] = 0; /* Status code */
1599 arr[n++] = 0; /* Vendor unique */
1600 arr[n++] = 0x1; /* One port per group */
1601 arr[n++] = 0; /* Reserved */
1602 arr[n++] = 0; /* Reserved */
1603 put_unaligned_be16(port_b, arr + n);
1604 n += 2;
1605
1606 rlen = n - 4;
1607 put_unaligned_be32(rlen, arr + 0);
1608
1609 /*
1610 * Return the smallest value of either
1611 * - The allocated length
1612 * - The constructed command length
1613 * - The maximum array size
1614 */
1615 rlen = min(alen,n);
1616 ret = fill_from_dev_buffer(scp, arr,
1617 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1618 kfree(arr);
1619 return ret;
1620 }
1621
1622 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1623 struct sdebug_dev_info *devip)
1624 {
1625 bool rctd;
1626 u8 reporting_opts, req_opcode, sdeb_i, supp;
1627 u16 req_sa, u;
1628 u32 alloc_len, a_len;
1629 int k, offset, len, errsts, count, bump, na;
1630 const struct opcode_info_t *oip;
1631 const struct opcode_info_t *r_oip;
1632 u8 *arr;
1633 u8 *cmd = scp->cmnd;
1634
1635 rctd = !!(cmd[2] & 0x80);
1636 reporting_opts = cmd[2] & 0x7;
1637 req_opcode = cmd[3];
1638 req_sa = get_unaligned_be16(cmd + 4);
1639 alloc_len = get_unaligned_be32(cmd + 6);
1640 if (alloc_len < 4 || alloc_len > 0xffff) {
1641 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1642 return check_condition_result;
1643 }
1644 if (alloc_len > 8192)
1645 a_len = 8192;
1646 else
1647 a_len = alloc_len;
1648 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1649 if (NULL == arr) {
1650 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1651 INSUFF_RES_ASCQ);
1652 return check_condition_result;
1653 }
1654 switch (reporting_opts) {
1655 case 0: /* all commands */
1656 /* count number of commands */
1657 for (count = 0, oip = opcode_info_arr;
1658 oip->num_attached != 0xff; ++oip) {
1659 if (F_INV_OP & oip->flags)
1660 continue;
1661 count += (oip->num_attached + 1);
1662 }
1663 bump = rctd ? 20 : 8;
1664 put_unaligned_be32(count * bump, arr);
1665 for (offset = 4, oip = opcode_info_arr;
1666 oip->num_attached != 0xff && offset < a_len; ++oip) {
1667 if (F_INV_OP & oip->flags)
1668 continue;
1669 na = oip->num_attached;
1670 arr[offset] = oip->opcode;
1671 put_unaligned_be16(oip->sa, arr + offset + 2);
1672 if (rctd)
1673 arr[offset + 5] |= 0x2;
1674 if (FF_SA & oip->flags)
1675 arr[offset + 5] |= 0x1;
1676 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1677 if (rctd)
1678 put_unaligned_be16(0xa, arr + offset + 8);
1679 r_oip = oip;
1680 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1681 if (F_INV_OP & oip->flags)
1682 continue;
1683 offset += bump;
1684 arr[offset] = oip->opcode;
1685 put_unaligned_be16(oip->sa, arr + offset + 2);
1686 if (rctd)
1687 arr[offset + 5] |= 0x2;
1688 if (FF_SA & oip->flags)
1689 arr[offset + 5] |= 0x1;
1690 put_unaligned_be16(oip->len_mask[0],
1691 arr + offset + 6);
1692 if (rctd)
1693 put_unaligned_be16(0xa,
1694 arr + offset + 8);
1695 }
1696 oip = r_oip;
1697 offset += bump;
1698 }
1699 break;
1700 case 1: /* one command: opcode only */
1701 case 2: /* one command: opcode plus service action */
1702 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1703 sdeb_i = opcode_ind_arr[req_opcode];
1704 oip = &opcode_info_arr[sdeb_i];
1705 if (F_INV_OP & oip->flags) {
1706 supp = 1;
1707 offset = 4;
1708 } else {
1709 if (1 == reporting_opts) {
1710 if (FF_SA & oip->flags) {
1711 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1712 2, 2);
1713 kfree(arr);
1714 return check_condition_result;
1715 }
1716 req_sa = 0;
1717 } else if (2 == reporting_opts &&
1718 0 == (FF_SA & oip->flags)) {
1719 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1720 kfree(arr); /* point at requested sa */
1721 return check_condition_result;
1722 }
1723 if (0 == (FF_SA & oip->flags) &&
1724 req_opcode == oip->opcode)
1725 supp = 3;
1726 else if (0 == (FF_SA & oip->flags)) {
1727 na = oip->num_attached;
1728 for (k = 0, oip = oip->arrp; k < na;
1729 ++k, ++oip) {
1730 if (req_opcode == oip->opcode)
1731 break;
1732 }
1733 supp = (k >= na) ? 1 : 3;
1734 } else if (req_sa != oip->sa) {
1735 na = oip->num_attached;
1736 for (k = 0, oip = oip->arrp; k < na;
1737 ++k, ++oip) {
1738 if (req_sa == oip->sa)
1739 break;
1740 }
1741 supp = (k >= na) ? 1 : 3;
1742 } else
1743 supp = 3;
1744 if (3 == supp) {
1745 u = oip->len_mask[0];
1746 put_unaligned_be16(u, arr + 2);
1747 arr[4] = oip->opcode;
1748 for (k = 1; k < u; ++k)
1749 arr[4 + k] = (k < 16) ?
1750 oip->len_mask[k] : 0xff;
1751 offset = 4 + u;
1752 } else
1753 offset = 4;
1754 }
1755 arr[1] = (rctd ? 0x80 : 0) | supp;
1756 if (rctd) {
1757 put_unaligned_be16(0xa, arr + offset);
1758 offset += 12;
1759 }
1760 break;
1761 default:
1762 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1763 kfree(arr);
1764 return check_condition_result;
1765 }
1766 offset = (offset < a_len) ? offset : a_len;
1767 len = (offset < alloc_len) ? offset : alloc_len;
1768 errsts = fill_from_dev_buffer(scp, arr, len);
1769 kfree(arr);
1770 return errsts;
1771 }
1772
1773 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1774 struct sdebug_dev_info *devip)
1775 {
1776 bool repd;
1777 u32 alloc_len, len;
1778 u8 arr[16];
1779 u8 *cmd = scp->cmnd;
1780
1781 memset(arr, 0, sizeof(arr));
1782 repd = !!(cmd[2] & 0x80);
1783 alloc_len = get_unaligned_be32(cmd + 6);
1784 if (alloc_len < 4) {
1785 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1786 return check_condition_result;
1787 }
1788 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1789 arr[1] = 0x1; /* ITNRS */
1790 if (repd) {
1791 arr[3] = 0xc;
1792 len = 16;
1793 } else
1794 len = 4;
1795
1796 len = (len < alloc_len) ? len : alloc_len;
1797 return fill_from_dev_buffer(scp, arr, len);
1798 }
1799
1800 /* <<Following mode page info copied from ST318451LW>> */
1801
1802 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1803 { /* Read-Write Error Recovery page for mode_sense */
1804 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1805 5, 0, 0xff, 0xff};
1806
1807 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1808 if (1 == pcontrol)
1809 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1810 return sizeof(err_recov_pg);
1811 }
1812
1813 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1814 { /* Disconnect-Reconnect page for mode_sense */
1815 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1816 0, 0, 0, 0, 0, 0, 0, 0};
1817
1818 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1819 if (1 == pcontrol)
1820 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1821 return sizeof(disconnect_pg);
1822 }
1823
1824 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1825 { /* Format device page for mode_sense */
1826 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1827 0, 0, 0, 0, 0, 0, 0, 0,
1828 0, 0, 0, 0, 0x40, 0, 0, 0};
1829
1830 memcpy(p, format_pg, sizeof(format_pg));
1831 put_unaligned_be16(sdebug_sectors_per, p + 10);
1832 put_unaligned_be16(sdebug_sector_size, p + 12);
1833 if (sdebug_removable)
1834 p[20] |= 0x20; /* should agree with INQUIRY */
1835 if (1 == pcontrol)
1836 memset(p + 2, 0, sizeof(format_pg) - 2);
1837 return sizeof(format_pg);
1838 }
1839
1840 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1841 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1842 0, 0, 0, 0};
1843
1844 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1845 { /* Caching page for mode_sense */
1846 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1847 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1848 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1849 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1850
1851 if (SDEBUG_OPT_N_WCE & sdebug_opts)
1852 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1853 memcpy(p, caching_pg, sizeof(caching_pg));
1854 if (1 == pcontrol)
1855 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1856 else if (2 == pcontrol)
1857 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1858 return sizeof(caching_pg);
1859 }
1860
1861 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1862 0, 0, 0x2, 0x4b};
1863
1864 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1865 { /* Control mode page for mode_sense */
1866 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1867 0, 0, 0, 0};
1868 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1869 0, 0, 0x2, 0x4b};
1870
1871 if (sdebug_dsense)
1872 ctrl_m_pg[2] |= 0x4;
1873 else
1874 ctrl_m_pg[2] &= ~0x4;
1875
1876 if (sdebug_ato)
1877 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1878
1879 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1880 if (1 == pcontrol)
1881 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1882 else if (2 == pcontrol)
1883 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1884 return sizeof(ctrl_m_pg);
1885 }
1886
1887
1888 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1889 { /* Informational Exceptions control mode page for mode_sense */
1890 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1891 0, 0, 0x0, 0x0};
1892 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1893 0, 0, 0x0, 0x0};
1894
1895 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1896 if (1 == pcontrol)
1897 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1898 else if (2 == pcontrol)
1899 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1900 return sizeof(iec_m_pg);
1901 }
1902
1903 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1904 { /* SAS SSP mode page - short format for mode_sense */
1905 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1906 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1907
1908 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1909 if (1 == pcontrol)
1910 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1911 return sizeof(sas_sf_m_pg);
1912 }
1913
1914
1915 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1916 int target_dev_id)
1917 { /* SAS phy control and discover mode page for mode_sense */
1918 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1919 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1920 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1921 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1922 0x2, 0, 0, 0, 0, 0, 0, 0,
1923 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1924 0, 0, 0, 0, 0, 0, 0, 0,
1925 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1926 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1927 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1928 0x3, 0, 0, 0, 0, 0, 0, 0,
1929 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1930 0, 0, 0, 0, 0, 0, 0, 0,
1931 };
1932 int port_a, port_b;
1933
1934 put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1935 put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1936 put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1937 put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1938 port_a = target_dev_id + 1;
1939 port_b = port_a + 1;
1940 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1941 put_unaligned_be32(port_a, p + 20);
1942 put_unaligned_be32(port_b, p + 48 + 20);
1943 if (1 == pcontrol)
1944 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1945 return sizeof(sas_pcd_m_pg);
1946 }
1947
1948 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1949 { /* SAS SSP shared protocol specific port mode subpage */
1950 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1951 0, 0, 0, 0, 0, 0, 0, 0,
1952 };
1953
1954 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1955 if (1 == pcontrol)
1956 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1957 return sizeof(sas_sha_m_pg);
1958 }
1959
1960 #define SDEBUG_MAX_MSENSE_SZ 256
1961
1962 static int resp_mode_sense(struct scsi_cmnd *scp,
1963 struct sdebug_dev_info *devip)
1964 {
1965 int pcontrol, pcode, subpcode, bd_len;
1966 unsigned char dev_spec;
1967 int alloc_len, offset, len, target_dev_id;
1968 int target = scp->device->id;
1969 unsigned char * ap;
1970 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1971 unsigned char *cmd = scp->cmnd;
1972 bool dbd, llbaa, msense_6, is_disk, bad_pcode;
1973
1974 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
1975 pcontrol = (cmd[2] & 0xc0) >> 6;
1976 pcode = cmd[2] & 0x3f;
1977 subpcode = cmd[3];
1978 msense_6 = (MODE_SENSE == cmd[0]);
1979 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
1980 is_disk = (sdebug_ptype == TYPE_DISK);
1981 if (is_disk && !dbd)
1982 bd_len = llbaa ? 16 : 8;
1983 else
1984 bd_len = 0;
1985 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1986 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1987 if (0x3 == pcontrol) { /* Saving values not supported */
1988 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1989 return check_condition_result;
1990 }
1991 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1992 (devip->target * 1000) - 3;
1993 /* for disks set DPOFUA bit and clear write protect (WP) bit */
1994 if (is_disk)
1995 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
1996 else
1997 dev_spec = 0x0;
1998 if (msense_6) {
1999 arr[2] = dev_spec;
2000 arr[3] = bd_len;
2001 offset = 4;
2002 } else {
2003 arr[3] = dev_spec;
2004 if (16 == bd_len)
2005 arr[4] = 0x1; /* set LONGLBA bit */
2006 arr[7] = bd_len; /* assume 255 or less */
2007 offset = 8;
2008 }
2009 ap = arr + offset;
2010 if ((bd_len > 0) && (!sdebug_capacity))
2011 sdebug_capacity = get_sdebug_capacity();
2012
2013 if (8 == bd_len) {
2014 if (sdebug_capacity > 0xfffffffe)
2015 put_unaligned_be32(0xffffffff, ap + 0);
2016 else
2017 put_unaligned_be32(sdebug_capacity, ap + 0);
2018 put_unaligned_be16(sdebug_sector_size, ap + 6);
2019 offset += bd_len;
2020 ap = arr + offset;
2021 } else if (16 == bd_len) {
2022 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2023 put_unaligned_be32(sdebug_sector_size, ap + 12);
2024 offset += bd_len;
2025 ap = arr + offset;
2026 }
2027
2028 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2029 /* TODO: Control Extension page */
2030 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2031 return check_condition_result;
2032 }
2033 bad_pcode = false;
2034
2035 switch (pcode) {
2036 case 0x1: /* Read-Write error recovery page, direct access */
2037 len = resp_err_recov_pg(ap, pcontrol, target);
2038 offset += len;
2039 break;
2040 case 0x2: /* Disconnect-Reconnect page, all devices */
2041 len = resp_disconnect_pg(ap, pcontrol, target);
2042 offset += len;
2043 break;
2044 case 0x3: /* Format device page, direct access */
2045 if (is_disk) {
2046 len = resp_format_pg(ap, pcontrol, target);
2047 offset += len;
2048 } else
2049 bad_pcode = true;
2050 break;
2051 case 0x8: /* Caching page, direct access */
2052 if (is_disk) {
2053 len = resp_caching_pg(ap, pcontrol, target);
2054 offset += len;
2055 } else
2056 bad_pcode = true;
2057 break;
2058 case 0xa: /* Control Mode page, all devices */
2059 len = resp_ctrl_m_pg(ap, pcontrol, target);
2060 offset += len;
2061 break;
2062 case 0x19: /* if spc==1 then sas phy, control+discover */
2063 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2064 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2065 return check_condition_result;
2066 }
2067 len = 0;
2068 if ((0x0 == subpcode) || (0xff == subpcode))
2069 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2070 if ((0x1 == subpcode) || (0xff == subpcode))
2071 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2072 target_dev_id);
2073 if ((0x2 == subpcode) || (0xff == subpcode))
2074 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2075 offset += len;
2076 break;
2077 case 0x1c: /* Informational Exceptions Mode page, all devices */
2078 len = resp_iec_m_pg(ap, pcontrol, target);
2079 offset += len;
2080 break;
2081 case 0x3f: /* Read all Mode pages */
2082 if ((0 == subpcode) || (0xff == subpcode)) {
2083 len = resp_err_recov_pg(ap, pcontrol, target);
2084 len += resp_disconnect_pg(ap + len, pcontrol, target);
2085 if (is_disk) {
2086 len += resp_format_pg(ap + len, pcontrol,
2087 target);
2088 len += resp_caching_pg(ap + len, pcontrol,
2089 target);
2090 }
2091 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2092 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2093 if (0xff == subpcode) {
2094 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2095 target, target_dev_id);
2096 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2097 }
2098 len += resp_iec_m_pg(ap + len, pcontrol, target);
2099 offset += len;
2100 } else {
2101 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2102 return check_condition_result;
2103 }
2104 break;
2105 default:
2106 bad_pcode = true;
2107 break;
2108 }
2109 if (bad_pcode) {
2110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2111 return check_condition_result;
2112 }
2113 if (msense_6)
2114 arr[0] = offset - 1;
2115 else
2116 put_unaligned_be16((offset - 2), arr + 0);
2117 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2118 }
2119
2120 #define SDEBUG_MAX_MSELECT_SZ 512
2121
2122 static int resp_mode_select(struct scsi_cmnd *scp,
2123 struct sdebug_dev_info *devip)
2124 {
2125 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2126 int param_len, res, mpage;
2127 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2128 unsigned char *cmd = scp->cmnd;
2129 int mselect6 = (MODE_SELECT == cmd[0]);
2130
2131 memset(arr, 0, sizeof(arr));
2132 pf = cmd[1] & 0x10;
2133 sp = cmd[1] & 0x1;
2134 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2135 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2136 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2137 return check_condition_result;
2138 }
2139 res = fetch_to_dev_buffer(scp, arr, param_len);
2140 if (-1 == res)
2141 return DID_ERROR << 16;
2142 else if (sdebug_verbose && (res < param_len))
2143 sdev_printk(KERN_INFO, scp->device,
2144 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2145 __func__, param_len, res);
2146 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2147 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2148 if (md_len > 2) {
2149 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2150 return check_condition_result;
2151 }
2152 off = bd_len + (mselect6 ? 4 : 8);
2153 mpage = arr[off] & 0x3f;
2154 ps = !!(arr[off] & 0x80);
2155 if (ps) {
2156 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2157 return check_condition_result;
2158 }
2159 spf = !!(arr[off] & 0x40);
2160 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2161 (arr[off + 1] + 2);
2162 if ((pg_len + off) > param_len) {
2163 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2164 PARAMETER_LIST_LENGTH_ERR, 0);
2165 return check_condition_result;
2166 }
2167 switch (mpage) {
2168 case 0x8: /* Caching Mode page */
2169 if (caching_pg[1] == arr[off + 1]) {
2170 memcpy(caching_pg + 2, arr + off + 2,
2171 sizeof(caching_pg) - 2);
2172 goto set_mode_changed_ua;
2173 }
2174 break;
2175 case 0xa: /* Control Mode page */
2176 if (ctrl_m_pg[1] == arr[off + 1]) {
2177 memcpy(ctrl_m_pg + 2, arr + off + 2,
2178 sizeof(ctrl_m_pg) - 2);
2179 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2180 goto set_mode_changed_ua;
2181 }
2182 break;
2183 case 0x1c: /* Informational Exceptions Mode page */
2184 if (iec_m_pg[1] == arr[off + 1]) {
2185 memcpy(iec_m_pg + 2, arr + off + 2,
2186 sizeof(iec_m_pg) - 2);
2187 goto set_mode_changed_ua;
2188 }
2189 break;
2190 default:
2191 break;
2192 }
2193 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2194 return check_condition_result;
2195 set_mode_changed_ua:
2196 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2197 return 0;
2198 }
2199
2200 static int resp_temp_l_pg(unsigned char * arr)
2201 {
2202 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2203 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2204 };
2205
2206 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2207 return sizeof(temp_l_pg);
2208 }
2209
2210 static int resp_ie_l_pg(unsigned char * arr)
2211 {
2212 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2213 };
2214
2215 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2216 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2217 arr[4] = THRESHOLD_EXCEEDED;
2218 arr[5] = 0xff;
2219 }
2220 return sizeof(ie_l_pg);
2221 }
2222
2223 #define SDEBUG_MAX_LSENSE_SZ 512
2224
2225 static int resp_log_sense(struct scsi_cmnd * scp,
2226 struct sdebug_dev_info * devip)
2227 {
2228 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2229 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2230 unsigned char *cmd = scp->cmnd;
2231
2232 memset(arr, 0, sizeof(arr));
2233 ppc = cmd[1] & 0x2;
2234 sp = cmd[1] & 0x1;
2235 if (ppc || sp) {
2236 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2237 return check_condition_result;
2238 }
2239 pcontrol = (cmd[2] & 0xc0) >> 6;
2240 pcode = cmd[2] & 0x3f;
2241 subpcode = cmd[3] & 0xff;
2242 alloc_len = get_unaligned_be16(cmd + 7);
2243 arr[0] = pcode;
2244 if (0 == subpcode) {
2245 switch (pcode) {
2246 case 0x0: /* Supported log pages log page */
2247 n = 4;
2248 arr[n++] = 0x0; /* this page */
2249 arr[n++] = 0xd; /* Temperature */
2250 arr[n++] = 0x2f; /* Informational exceptions */
2251 arr[3] = n - 4;
2252 break;
2253 case 0xd: /* Temperature log page */
2254 arr[3] = resp_temp_l_pg(arr + 4);
2255 break;
2256 case 0x2f: /* Informational exceptions log page */
2257 arr[3] = resp_ie_l_pg(arr + 4);
2258 break;
2259 default:
2260 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2261 return check_condition_result;
2262 }
2263 } else if (0xff == subpcode) {
2264 arr[0] |= 0x40;
2265 arr[1] = subpcode;
2266 switch (pcode) {
2267 case 0x0: /* Supported log pages and subpages log page */
2268 n = 4;
2269 arr[n++] = 0x0;
2270 arr[n++] = 0x0; /* 0,0 page */
2271 arr[n++] = 0x0;
2272 arr[n++] = 0xff; /* this page */
2273 arr[n++] = 0xd;
2274 arr[n++] = 0x0; /* Temperature */
2275 arr[n++] = 0x2f;
2276 arr[n++] = 0x0; /* Informational exceptions */
2277 arr[3] = n - 4;
2278 break;
2279 case 0xd: /* Temperature subpages */
2280 n = 4;
2281 arr[n++] = 0xd;
2282 arr[n++] = 0x0; /* Temperature */
2283 arr[3] = n - 4;
2284 break;
2285 case 0x2f: /* Informational exceptions subpages */
2286 n = 4;
2287 arr[n++] = 0x2f;
2288 arr[n++] = 0x0; /* Informational exceptions */
2289 arr[3] = n - 4;
2290 break;
2291 default:
2292 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2293 return check_condition_result;
2294 }
2295 } else {
2296 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2297 return check_condition_result;
2298 }
2299 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2300 return fill_from_dev_buffer(scp, arr,
2301 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2302 }
2303
2304 static int check_device_access_params(struct scsi_cmnd *scp,
2305 unsigned long long lba, unsigned int num)
2306 {
2307 if (lba + num > sdebug_capacity) {
2308 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2309 return check_condition_result;
2310 }
2311 /* transfer length excessive (tie in to block limits VPD page) */
2312 if (num > sdebug_store_sectors) {
2313 /* needs work to find which cdb byte 'num' comes from */
2314 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2315 return check_condition_result;
2316 }
2317 return 0;
2318 }
2319
2320 /* Returns number of bytes copied or -1 if error. */
2321 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2322 bool do_write)
2323 {
2324 int ret;
2325 u64 block, rest = 0;
2326 struct scsi_data_buffer *sdb;
2327 enum dma_data_direction dir;
2328
2329 if (do_write) {
2330 sdb = scsi_out(scmd);
2331 dir = DMA_TO_DEVICE;
2332 } else {
2333 sdb = scsi_in(scmd);
2334 dir = DMA_FROM_DEVICE;
2335 }
2336
2337 if (!sdb->length)
2338 return 0;
2339 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2340 return -1;
2341
2342 block = do_div(lba, sdebug_store_sectors);
2343 if (block + num > sdebug_store_sectors)
2344 rest = block + num - sdebug_store_sectors;
2345
2346 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2347 fake_storep + (block * sdebug_sector_size),
2348 (num - rest) * sdebug_sector_size, 0, do_write);
2349 if (ret != (num - rest) * sdebug_sector_size)
2350 return ret;
2351
2352 if (rest) {
2353 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2354 fake_storep, rest * sdebug_sector_size,
2355 (num - rest) * sdebug_sector_size, do_write);
2356 }
2357
2358 return ret;
2359 }
2360
2361 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2362 * arr into fake_store(lba,num) and return true. If comparison fails then
2363 * return false. */
2364 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2365 {
2366 bool res;
2367 u64 block, rest = 0;
2368 u32 store_blks = sdebug_store_sectors;
2369 u32 lb_size = sdebug_sector_size;
2370
2371 block = do_div(lba, store_blks);
2372 if (block + num > store_blks)
2373 rest = block + num - store_blks;
2374
2375 res = !memcmp(fake_storep + (block * lb_size), arr,
2376 (num - rest) * lb_size);
2377 if (!res)
2378 return res;
2379 if (rest)
2380 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2381 rest * lb_size);
2382 if (!res)
2383 return res;
2384 arr += num * lb_size;
2385 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2386 if (rest)
2387 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2388 rest * lb_size);
2389 return res;
2390 }
2391
2392 static __be16 dif_compute_csum(const void *buf, int len)
2393 {
2394 __be16 csum;
2395
2396 if (sdebug_guard)
2397 csum = (__force __be16)ip_compute_csum(buf, len);
2398 else
2399 csum = cpu_to_be16(crc_t10dif(buf, len));
2400
2401 return csum;
2402 }
2403
2404 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2405 sector_t sector, u32 ei_lba)
2406 {
2407 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2408
2409 if (sdt->guard_tag != csum) {
2410 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2411 (unsigned long)sector,
2412 be16_to_cpu(sdt->guard_tag),
2413 be16_to_cpu(csum));
2414 return 0x01;
2415 }
2416 if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2417 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2418 pr_err("REF check failed on sector %lu\n",
2419 (unsigned long)sector);
2420 return 0x03;
2421 }
2422 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2423 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2424 pr_err("REF check failed on sector %lu\n",
2425 (unsigned long)sector);
2426 return 0x03;
2427 }
2428 return 0;
2429 }
2430
2431 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2432 unsigned int sectors, bool read)
2433 {
2434 size_t resid;
2435 void *paddr;
2436 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2437 struct sg_mapping_iter miter;
2438
2439 /* Bytes of protection data to copy into sgl */
2440 resid = sectors * sizeof(*dif_storep);
2441
2442 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2443 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2444 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2445
2446 while (sg_miter_next(&miter) && resid > 0) {
2447 size_t len = min(miter.length, resid);
2448 void *start = dif_store(sector);
2449 size_t rest = 0;
2450
2451 if (dif_store_end < start + len)
2452 rest = start + len - dif_store_end;
2453
2454 paddr = miter.addr;
2455
2456 if (read)
2457 memcpy(paddr, start, len - rest);
2458 else
2459 memcpy(start, paddr, len - rest);
2460
2461 if (rest) {
2462 if (read)
2463 memcpy(paddr + len - rest, dif_storep, rest);
2464 else
2465 memcpy(dif_storep, paddr + len - rest, rest);
2466 }
2467
2468 sector += len / sizeof(*dif_storep);
2469 resid -= len;
2470 }
2471 sg_miter_stop(&miter);
2472 }
2473
2474 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2475 unsigned int sectors, u32 ei_lba)
2476 {
2477 unsigned int i;
2478 struct sd_dif_tuple *sdt;
2479 sector_t sector;
2480
2481 for (i = 0; i < sectors; i++, ei_lba++) {
2482 int ret;
2483
2484 sector = start_sec + i;
2485 sdt = dif_store(sector);
2486
2487 if (sdt->app_tag == cpu_to_be16(0xffff))
2488 continue;
2489
2490 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2491 if (ret) {
2492 dif_errors++;
2493 return ret;
2494 }
2495 }
2496
2497 dif_copy_prot(SCpnt, start_sec, sectors, true);
2498 dix_reads++;
2499
2500 return 0;
2501 }
2502
2503 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2504 {
2505 u8 *cmd = scp->cmnd;
2506 struct sdebug_queued_cmd *sqcp;
2507 u64 lba;
2508 u32 num;
2509 u32 ei_lba;
2510 unsigned long iflags;
2511 int ret;
2512 bool check_prot;
2513
2514 switch (cmd[0]) {
2515 case READ_16:
2516 ei_lba = 0;
2517 lba = get_unaligned_be64(cmd + 2);
2518 num = get_unaligned_be32(cmd + 10);
2519 check_prot = true;
2520 break;
2521 case READ_10:
2522 ei_lba = 0;
2523 lba = get_unaligned_be32(cmd + 2);
2524 num = get_unaligned_be16(cmd + 7);
2525 check_prot = true;
2526 break;
2527 case READ_6:
2528 ei_lba = 0;
2529 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2530 (u32)(cmd[1] & 0x1f) << 16;
2531 num = (0 == cmd[4]) ? 256 : cmd[4];
2532 check_prot = true;
2533 break;
2534 case READ_12:
2535 ei_lba = 0;
2536 lba = get_unaligned_be32(cmd + 2);
2537 num = get_unaligned_be32(cmd + 6);
2538 check_prot = true;
2539 break;
2540 case XDWRITEREAD_10:
2541 ei_lba = 0;
2542 lba = get_unaligned_be32(cmd + 2);
2543 num = get_unaligned_be16(cmd + 7);
2544 check_prot = false;
2545 break;
2546 default: /* assume READ(32) */
2547 lba = get_unaligned_be64(cmd + 12);
2548 ei_lba = get_unaligned_be32(cmd + 20);
2549 num = get_unaligned_be32(cmd + 28);
2550 check_prot = false;
2551 break;
2552 }
2553 if (unlikely(have_dif_prot && check_prot)) {
2554 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2555 (cmd[1] & 0xe0)) {
2556 mk_sense_invalid_opcode(scp);
2557 return check_condition_result;
2558 }
2559 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2560 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2561 (cmd[1] & 0xe0) == 0)
2562 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2563 "to DIF device\n");
2564 }
2565 if (unlikely(sdebug_any_injecting_opt)) {
2566 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2567
2568 if (sqcp) {
2569 if (sqcp->inj_short)
2570 num /= 2;
2571 }
2572 } else
2573 sqcp = NULL;
2574
2575 /* inline check_device_access_params() */
2576 if (unlikely(lba + num > sdebug_capacity)) {
2577 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2578 return check_condition_result;
2579 }
2580 /* transfer length excessive (tie in to block limits VPD page) */
2581 if (unlikely(num > sdebug_store_sectors)) {
2582 /* needs work to find which cdb byte 'num' comes from */
2583 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2584 return check_condition_result;
2585 }
2586
2587 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2588 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2589 ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2590 /* claim unrecoverable read error */
2591 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2592 /* set info field and valid bit for fixed descriptor */
2593 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2594 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2595 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2596 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2597 put_unaligned_be32(ret, scp->sense_buffer + 3);
2598 }
2599 scsi_set_resid(scp, scsi_bufflen(scp));
2600 return check_condition_result;
2601 }
2602
2603 read_lock_irqsave(&atomic_rw, iflags);
2604
2605 /* DIX + T10 DIF */
2606 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2607 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2608
2609 if (prot_ret) {
2610 read_unlock_irqrestore(&atomic_rw, iflags);
2611 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2612 return illegal_condition_result;
2613 }
2614 }
2615
2616 ret = do_device_access(scp, lba, num, false);
2617 read_unlock_irqrestore(&atomic_rw, iflags);
2618 if (unlikely(ret == -1))
2619 return DID_ERROR << 16;
2620
2621 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2622
2623 if (unlikely(sqcp)) {
2624 if (sqcp->inj_recovered) {
2625 mk_sense_buffer(scp, RECOVERED_ERROR,
2626 THRESHOLD_EXCEEDED, 0);
2627 return check_condition_result;
2628 } else if (sqcp->inj_transport) {
2629 mk_sense_buffer(scp, ABORTED_COMMAND,
2630 TRANSPORT_PROBLEM, ACK_NAK_TO);
2631 return check_condition_result;
2632 } else if (sqcp->inj_dif) {
2633 /* Logical block guard check failed */
2634 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2635 return illegal_condition_result;
2636 } else if (sqcp->inj_dix) {
2637 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2638 return illegal_condition_result;
2639 }
2640 }
2641 return 0;
2642 }
2643
2644 static void dump_sector(unsigned char *buf, int len)
2645 {
2646 int i, j, n;
2647
2648 pr_err(">>> Sector Dump <<<\n");
2649 for (i = 0 ; i < len ; i += 16) {
2650 char b[128];
2651
2652 for (j = 0, n = 0; j < 16; j++) {
2653 unsigned char c = buf[i+j];
2654
2655 if (c >= 0x20 && c < 0x7e)
2656 n += scnprintf(b + n, sizeof(b) - n,
2657 " %c ", buf[i+j]);
2658 else
2659 n += scnprintf(b + n, sizeof(b) - n,
2660 "%02x ", buf[i+j]);
2661 }
2662 pr_err("%04d: %s\n", i, b);
2663 }
2664 }
2665
2666 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2667 unsigned int sectors, u32 ei_lba)
2668 {
2669 int ret;
2670 struct sd_dif_tuple *sdt;
2671 void *daddr;
2672 sector_t sector = start_sec;
2673 int ppage_offset;
2674 int dpage_offset;
2675 struct sg_mapping_iter diter;
2676 struct sg_mapping_iter piter;
2677
2678 BUG_ON(scsi_sg_count(SCpnt) == 0);
2679 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2680
2681 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2682 scsi_prot_sg_count(SCpnt),
2683 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2684 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2685 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2686
2687 /* For each protection page */
2688 while (sg_miter_next(&piter)) {
2689 dpage_offset = 0;
2690 if (WARN_ON(!sg_miter_next(&diter))) {
2691 ret = 0x01;
2692 goto out;
2693 }
2694
2695 for (ppage_offset = 0; ppage_offset < piter.length;
2696 ppage_offset += sizeof(struct sd_dif_tuple)) {
2697 /* If we're at the end of the current
2698 * data page advance to the next one
2699 */
2700 if (dpage_offset >= diter.length) {
2701 if (WARN_ON(!sg_miter_next(&diter))) {
2702 ret = 0x01;
2703 goto out;
2704 }
2705 dpage_offset = 0;
2706 }
2707
2708 sdt = piter.addr + ppage_offset;
2709 daddr = diter.addr + dpage_offset;
2710
2711 ret = dif_verify(sdt, daddr, sector, ei_lba);
2712 if (ret) {
2713 dump_sector(daddr, sdebug_sector_size);
2714 goto out;
2715 }
2716
2717 sector++;
2718 ei_lba++;
2719 dpage_offset += sdebug_sector_size;
2720 }
2721 diter.consumed = dpage_offset;
2722 sg_miter_stop(&diter);
2723 }
2724 sg_miter_stop(&piter);
2725
2726 dif_copy_prot(SCpnt, start_sec, sectors, false);
2727 dix_writes++;
2728
2729 return 0;
2730
2731 out:
2732 dif_errors++;
2733 sg_miter_stop(&diter);
2734 sg_miter_stop(&piter);
2735 return ret;
2736 }
2737
2738 static unsigned long lba_to_map_index(sector_t lba)
2739 {
2740 if (sdebug_unmap_alignment)
2741 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2742 sector_div(lba, sdebug_unmap_granularity);
2743 return lba;
2744 }
2745
2746 static sector_t map_index_to_lba(unsigned long index)
2747 {
2748 sector_t lba = index * sdebug_unmap_granularity;
2749
2750 if (sdebug_unmap_alignment)
2751 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2752 return lba;
2753 }
2754
2755 static unsigned int map_state(sector_t lba, unsigned int *num)
2756 {
2757 sector_t end;
2758 unsigned int mapped;
2759 unsigned long index;
2760 unsigned long next;
2761
2762 index = lba_to_map_index(lba);
2763 mapped = test_bit(index, map_storep);
2764
2765 if (mapped)
2766 next = find_next_zero_bit(map_storep, map_size, index);
2767 else
2768 next = find_next_bit(map_storep, map_size, index);
2769
2770 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2771 *num = end - lba;
2772 return mapped;
2773 }
2774
2775 static void map_region(sector_t lba, unsigned int len)
2776 {
2777 sector_t end = lba + len;
2778
2779 while (lba < end) {
2780 unsigned long index = lba_to_map_index(lba);
2781
2782 if (index < map_size)
2783 set_bit(index, map_storep);
2784
2785 lba = map_index_to_lba(index + 1);
2786 }
2787 }
2788
2789 static void unmap_region(sector_t lba, unsigned int len)
2790 {
2791 sector_t end = lba + len;
2792
2793 while (lba < end) {
2794 unsigned long index = lba_to_map_index(lba);
2795
2796 if (lba == map_index_to_lba(index) &&
2797 lba + sdebug_unmap_granularity <= end &&
2798 index < map_size) {
2799 clear_bit(index, map_storep);
2800 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
2801 memset(fake_storep +
2802 lba * sdebug_sector_size,
2803 (sdebug_lbprz & 1) ? 0 : 0xff,
2804 sdebug_sector_size *
2805 sdebug_unmap_granularity);
2806 }
2807 if (dif_storep) {
2808 memset(dif_storep + lba, 0xff,
2809 sizeof(*dif_storep) *
2810 sdebug_unmap_granularity);
2811 }
2812 }
2813 lba = map_index_to_lba(index + 1);
2814 }
2815 }
2816
2817 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2818 {
2819 u8 *cmd = scp->cmnd;
2820 u64 lba;
2821 u32 num;
2822 u32 ei_lba;
2823 unsigned long iflags;
2824 int ret;
2825 bool check_prot;
2826
2827 switch (cmd[0]) {
2828 case WRITE_16:
2829 ei_lba = 0;
2830 lba = get_unaligned_be64(cmd + 2);
2831 num = get_unaligned_be32(cmd + 10);
2832 check_prot = true;
2833 break;
2834 case WRITE_10:
2835 ei_lba = 0;
2836 lba = get_unaligned_be32(cmd + 2);
2837 num = get_unaligned_be16(cmd + 7);
2838 check_prot = true;
2839 break;
2840 case WRITE_6:
2841 ei_lba = 0;
2842 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2843 (u32)(cmd[1] & 0x1f) << 16;
2844 num = (0 == cmd[4]) ? 256 : cmd[4];
2845 check_prot = true;
2846 break;
2847 case WRITE_12:
2848 ei_lba = 0;
2849 lba = get_unaligned_be32(cmd + 2);
2850 num = get_unaligned_be32(cmd + 6);
2851 check_prot = true;
2852 break;
2853 case 0x53: /* XDWRITEREAD(10) */
2854 ei_lba = 0;
2855 lba = get_unaligned_be32(cmd + 2);
2856 num = get_unaligned_be16(cmd + 7);
2857 check_prot = false;
2858 break;
2859 default: /* assume WRITE(32) */
2860 lba = get_unaligned_be64(cmd + 12);
2861 ei_lba = get_unaligned_be32(cmd + 20);
2862 num = get_unaligned_be32(cmd + 28);
2863 check_prot = false;
2864 break;
2865 }
2866 if (unlikely(have_dif_prot && check_prot)) {
2867 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2868 (cmd[1] & 0xe0)) {
2869 mk_sense_invalid_opcode(scp);
2870 return check_condition_result;
2871 }
2872 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2873 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2874 (cmd[1] & 0xe0) == 0)
2875 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2876 "to DIF device\n");
2877 }
2878
2879 /* inline check_device_access_params() */
2880 if (unlikely(lba + num > sdebug_capacity)) {
2881 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2882 return check_condition_result;
2883 }
2884 /* transfer length excessive (tie in to block limits VPD page) */
2885 if (unlikely(num > sdebug_store_sectors)) {
2886 /* needs work to find which cdb byte 'num' comes from */
2887 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2888 return check_condition_result;
2889 }
2890
2891 write_lock_irqsave(&atomic_rw, iflags);
2892
2893 /* DIX + T10 DIF */
2894 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2895 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2896
2897 if (prot_ret) {
2898 write_unlock_irqrestore(&atomic_rw, iflags);
2899 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2900 return illegal_condition_result;
2901 }
2902 }
2903
2904 ret = do_device_access(scp, lba, num, true);
2905 if (unlikely(scsi_debug_lbp()))
2906 map_region(lba, num);
2907 write_unlock_irqrestore(&atomic_rw, iflags);
2908 if (unlikely(-1 == ret))
2909 return DID_ERROR << 16;
2910 else if (unlikely(sdebug_verbose &&
2911 (ret < (num * sdebug_sector_size))))
2912 sdev_printk(KERN_INFO, scp->device,
2913 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2914 my_name, num * sdebug_sector_size, ret);
2915
2916 if (unlikely(sdebug_any_injecting_opt)) {
2917 struct sdebug_queued_cmd *sqcp =
2918 (struct sdebug_queued_cmd *)scp->host_scribble;
2919
2920 if (sqcp) {
2921 if (sqcp->inj_recovered) {
2922 mk_sense_buffer(scp, RECOVERED_ERROR,
2923 THRESHOLD_EXCEEDED, 0);
2924 return check_condition_result;
2925 } else if (sqcp->inj_dif) {
2926 /* Logical block guard check failed */
2927 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2928 return illegal_condition_result;
2929 } else if (sqcp->inj_dix) {
2930 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2931 return illegal_condition_result;
2932 }
2933 }
2934 }
2935 return 0;
2936 }
2937
2938 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2939 u32 ei_lba, bool unmap, bool ndob)
2940 {
2941 unsigned long iflags;
2942 unsigned long long i;
2943 int ret;
2944 u64 lba_off;
2945
2946 ret = check_device_access_params(scp, lba, num);
2947 if (ret)
2948 return ret;
2949
2950 write_lock_irqsave(&atomic_rw, iflags);
2951
2952 if (unmap && scsi_debug_lbp()) {
2953 unmap_region(lba, num);
2954 goto out;
2955 }
2956
2957 lba_off = lba * sdebug_sector_size;
2958 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2959 if (ndob) {
2960 memset(fake_storep + lba_off, 0, sdebug_sector_size);
2961 ret = 0;
2962 } else
2963 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2964 sdebug_sector_size);
2965
2966 if (-1 == ret) {
2967 write_unlock_irqrestore(&atomic_rw, iflags);
2968 return DID_ERROR << 16;
2969 } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2970 sdev_printk(KERN_INFO, scp->device,
2971 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2972 my_name, "write same",
2973 num * sdebug_sector_size, ret);
2974
2975 /* Copy first sector to remaining blocks */
2976 for (i = 1 ; i < num ; i++)
2977 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2978 fake_storep + lba_off,
2979 sdebug_sector_size);
2980
2981 if (scsi_debug_lbp())
2982 map_region(lba, num);
2983 out:
2984 write_unlock_irqrestore(&atomic_rw, iflags);
2985
2986 return 0;
2987 }
2988
2989 static int resp_write_same_10(struct scsi_cmnd *scp,
2990 struct sdebug_dev_info *devip)
2991 {
2992 u8 *cmd = scp->cmnd;
2993 u32 lba;
2994 u16 num;
2995 u32 ei_lba = 0;
2996 bool unmap = false;
2997
2998 if (cmd[1] & 0x8) {
2999 if (sdebug_lbpws10 == 0) {
3000 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3001 return check_condition_result;
3002 } else
3003 unmap = true;
3004 }
3005 lba = get_unaligned_be32(cmd + 2);
3006 num = get_unaligned_be16(cmd + 7);
3007 if (num > sdebug_write_same_length) {
3008 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3009 return check_condition_result;
3010 }
3011 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3012 }
3013
3014 static int resp_write_same_16(struct scsi_cmnd *scp,
3015 struct sdebug_dev_info *devip)
3016 {
3017 u8 *cmd = scp->cmnd;
3018 u64 lba;
3019 u32 num;
3020 u32 ei_lba = 0;
3021 bool unmap = false;
3022 bool ndob = false;
3023
3024 if (cmd[1] & 0x8) { /* UNMAP */
3025 if (sdebug_lbpws == 0) {
3026 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3027 return check_condition_result;
3028 } else
3029 unmap = true;
3030 }
3031 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3032 ndob = true;
3033 lba = get_unaligned_be64(cmd + 2);
3034 num = get_unaligned_be32(cmd + 10);
3035 if (num > sdebug_write_same_length) {
3036 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3037 return check_condition_result;
3038 }
3039 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3040 }
3041
3042 /* Note the mode field is in the same position as the (lower) service action
3043 * field. For the Report supported operation codes command, SPC-4 suggests
3044 * each mode of this command should be reported separately; for future. */
3045 static int resp_write_buffer(struct scsi_cmnd *scp,
3046 struct sdebug_dev_info *devip)
3047 {
3048 u8 *cmd = scp->cmnd;
3049 struct scsi_device *sdp = scp->device;
3050 struct sdebug_dev_info *dp;
3051 u8 mode;
3052
3053 mode = cmd[1] & 0x1f;
3054 switch (mode) {
3055 case 0x4: /* download microcode (MC) and activate (ACT) */
3056 /* set UAs on this device only */
3057 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3058 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3059 break;
3060 case 0x5: /* download MC, save and ACT */
3061 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3062 break;
3063 case 0x6: /* download MC with offsets and ACT */
3064 /* set UAs on most devices (LUs) in this target */
3065 list_for_each_entry(dp,
3066 &devip->sdbg_host->dev_info_list,
3067 dev_list)
3068 if (dp->target == sdp->id) {
3069 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3070 if (devip != dp)
3071 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3072 dp->uas_bm);
3073 }
3074 break;
3075 case 0x7: /* download MC with offsets, save, and ACT */
3076 /* set UA on all devices (LUs) in this target */
3077 list_for_each_entry(dp,
3078 &devip->sdbg_host->dev_info_list,
3079 dev_list)
3080 if (dp->target == sdp->id)
3081 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3082 dp->uas_bm);
3083 break;
3084 default:
3085 /* do nothing for this command for other mode values */
3086 break;
3087 }
3088 return 0;
3089 }
3090
3091 static int resp_comp_write(struct scsi_cmnd *scp,
3092 struct sdebug_dev_info *devip)
3093 {
3094 u8 *cmd = scp->cmnd;
3095 u8 *arr;
3096 u8 *fake_storep_hold;
3097 u64 lba;
3098 u32 dnum;
3099 u32 lb_size = sdebug_sector_size;
3100 u8 num;
3101 unsigned long iflags;
3102 int ret;
3103 int retval = 0;
3104
3105 lba = get_unaligned_be64(cmd + 2);
3106 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3107 if (0 == num)
3108 return 0; /* degenerate case, not an error */
3109 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3110 (cmd[1] & 0xe0)) {
3111 mk_sense_invalid_opcode(scp);
3112 return check_condition_result;
3113 }
3114 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3115 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3116 (cmd[1] & 0xe0) == 0)
3117 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3118 "to DIF device\n");
3119
3120 /* inline check_device_access_params() */
3121 if (lba + num > sdebug_capacity) {
3122 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3123 return check_condition_result;
3124 }
3125 /* transfer length excessive (tie in to block limits VPD page) */
3126 if (num > sdebug_store_sectors) {
3127 /* needs work to find which cdb byte 'num' comes from */
3128 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3129 return check_condition_result;
3130 }
3131 dnum = 2 * num;
3132 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3133 if (NULL == arr) {
3134 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3135 INSUFF_RES_ASCQ);
3136 return check_condition_result;
3137 }
3138
3139 write_lock_irqsave(&atomic_rw, iflags);
3140
3141 /* trick do_device_access() to fetch both compare and write buffers
3142 * from data-in into arr. Safe (atomic) since write_lock held. */
3143 fake_storep_hold = fake_storep;
3144 fake_storep = arr;
3145 ret = do_device_access(scp, 0, dnum, true);
3146 fake_storep = fake_storep_hold;
3147 if (ret == -1) {
3148 retval = DID_ERROR << 16;
3149 goto cleanup;
3150 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3151 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3152 "indicated=%u, IO sent=%d bytes\n", my_name,
3153 dnum * lb_size, ret);
3154 if (!comp_write_worker(lba, num, arr)) {
3155 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3156 retval = check_condition_result;
3157 goto cleanup;
3158 }
3159 if (scsi_debug_lbp())
3160 map_region(lba, num);
3161 cleanup:
3162 write_unlock_irqrestore(&atomic_rw, iflags);
3163 kfree(arr);
3164 return retval;
3165 }
3166
3167 struct unmap_block_desc {
3168 __be64 lba;
3169 __be32 blocks;
3170 __be32 __reserved;
3171 };
3172
3173 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3174 {
3175 unsigned char *buf;
3176 struct unmap_block_desc *desc;
3177 unsigned int i, payload_len, descriptors;
3178 int ret;
3179 unsigned long iflags;
3180
3181
3182 if (!scsi_debug_lbp())
3183 return 0; /* fib and say its done */
3184 payload_len = get_unaligned_be16(scp->cmnd + 7);
3185 BUG_ON(scsi_bufflen(scp) != payload_len);
3186
3187 descriptors = (payload_len - 8) / 16;
3188 if (descriptors > sdebug_unmap_max_desc) {
3189 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3190 return check_condition_result;
3191 }
3192
3193 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3194 if (!buf) {
3195 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3196 INSUFF_RES_ASCQ);
3197 return check_condition_result;
3198 }
3199
3200 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3201
3202 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3203 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3204
3205 desc = (void *)&buf[8];
3206
3207 write_lock_irqsave(&atomic_rw, iflags);
3208
3209 for (i = 0 ; i < descriptors ; i++) {
3210 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3211 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3212
3213 ret = check_device_access_params(scp, lba, num);
3214 if (ret)
3215 goto out;
3216
3217 unmap_region(lba, num);
3218 }
3219
3220 ret = 0;
3221
3222 out:
3223 write_unlock_irqrestore(&atomic_rw, iflags);
3224 kfree(buf);
3225
3226 return ret;
3227 }
3228
3229 #define SDEBUG_GET_LBA_STATUS_LEN 32
3230
3231 static int resp_get_lba_status(struct scsi_cmnd *scp,
3232 struct sdebug_dev_info *devip)
3233 {
3234 u8 *cmd = scp->cmnd;
3235 u64 lba;
3236 u32 alloc_len, mapped, num;
3237 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3238 int ret;
3239
3240 lba = get_unaligned_be64(cmd + 2);
3241 alloc_len = get_unaligned_be32(cmd + 10);
3242
3243 if (alloc_len < 24)
3244 return 0;
3245
3246 ret = check_device_access_params(scp, lba, 1);
3247 if (ret)
3248 return ret;
3249
3250 if (scsi_debug_lbp())
3251 mapped = map_state(lba, &num);
3252 else {
3253 mapped = 1;
3254 /* following just in case virtual_gb changed */
3255 sdebug_capacity = get_sdebug_capacity();
3256 if (sdebug_capacity - lba <= 0xffffffff)
3257 num = sdebug_capacity - lba;
3258 else
3259 num = 0xffffffff;
3260 }
3261
3262 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3263 put_unaligned_be32(20, arr); /* Parameter Data Length */
3264 put_unaligned_be64(lba, arr + 8); /* LBA */
3265 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3266 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3267
3268 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3269 }
3270
3271 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3272 * (W-LUN), the normal Linux scanning logic does not associate it with a
3273 * device (e.g. /dev/sg7). The following magic will make that association:
3274 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3275 * where <n> is a host number. If there are multiple targets in a host then
3276 * the above will associate a W-LUN to each target. To only get a W-LUN
3277 * for target 2, then use "echo '- 2 49409' > scan" .
3278 */
3279 static int resp_report_luns(struct scsi_cmnd *scp,
3280 struct sdebug_dev_info *devip)
3281 {
3282 unsigned char *cmd = scp->cmnd;
3283 unsigned int alloc_len;
3284 unsigned char select_report;
3285 u64 lun;
3286 struct scsi_lun *lun_p;
3287 u8 *arr;
3288 unsigned int lun_cnt; /* normal LUN count (max: 256) */
3289 unsigned int wlun_cnt; /* report luns W-LUN count */
3290 unsigned int tlun_cnt; /* total LUN count */
3291 unsigned int rlen; /* response length (in bytes) */
3292 int i, res;
3293
3294 clear_luns_changed_on_target(devip);
3295
3296 select_report = cmd[2];
3297 alloc_len = get_unaligned_be32(cmd + 6);
3298
3299 if (alloc_len < 4) {
3300 pr_err("alloc len too small %d\n", alloc_len);
3301 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3302 return check_condition_result;
3303 }
3304
3305 switch (select_report) {
3306 case 0: /* all LUNs apart from W-LUNs */
3307 lun_cnt = sdebug_max_luns;
3308 wlun_cnt = 0;
3309 break;
3310 case 1: /* only W-LUNs */
3311 lun_cnt = 0;
3312 wlun_cnt = 1;
3313 break;
3314 case 2: /* all LUNs */
3315 lun_cnt = sdebug_max_luns;
3316 wlun_cnt = 1;
3317 break;
3318 case 0x10: /* only administrative LUs */
3319 case 0x11: /* see SPC-5 */
3320 case 0x12: /* only subsiduary LUs owned by referenced LU */
3321 default:
3322 pr_debug("select report invalid %d\n", select_report);
3323 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3324 return check_condition_result;
3325 }
3326
3327 if (sdebug_no_lun_0 && (lun_cnt > 0))
3328 --lun_cnt;
3329
3330 tlun_cnt = lun_cnt + wlun_cnt;
3331
3332 rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
3333 arr = vmalloc(rlen);
3334 if (!arr) {
3335 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3336 INSUFF_RES_ASCQ);
3337 return check_condition_result;
3338 }
3339 memset(arr, 0, rlen);
3340 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3341 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3342
3343 /* luns start at byte 8 in response following the header */
3344 lun_p = (struct scsi_lun *)&arr[8];
3345
3346 /* LUNs use single level peripheral device addressing method */
3347 lun = sdebug_no_lun_0 ? 1 : 0;
3348 for (i = 0; i < lun_cnt; i++)
3349 int_to_scsilun(lun++, lun_p++);
3350
3351 if (wlun_cnt)
3352 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
3353
3354 put_unaligned_be32(rlen - 8, &arr[0]);
3355
3356 res = fill_from_dev_buffer(scp, arr, rlen);
3357 vfree(arr);
3358 return res;
3359 }
3360
3361 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3362 unsigned int num, struct sdebug_dev_info *devip)
3363 {
3364 int j;
3365 unsigned char *kaddr, *buf;
3366 unsigned int offset;
3367 struct scsi_data_buffer *sdb = scsi_in(scp);
3368 struct sg_mapping_iter miter;
3369
3370 /* better not to use temporary buffer. */
3371 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3372 if (!buf) {
3373 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3374 INSUFF_RES_ASCQ);
3375 return check_condition_result;
3376 }
3377
3378 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3379
3380 offset = 0;
3381 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3382 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3383
3384 while (sg_miter_next(&miter)) {
3385 kaddr = miter.addr;
3386 for (j = 0; j < miter.length; j++)
3387 *(kaddr + j) ^= *(buf + offset + j);
3388
3389 offset += miter.length;
3390 }
3391 sg_miter_stop(&miter);
3392 kfree(buf);
3393
3394 return 0;
3395 }
3396
3397 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3398 struct sdebug_dev_info *devip)
3399 {
3400 u8 *cmd = scp->cmnd;
3401 u64 lba;
3402 u32 num;
3403 int errsts;
3404
3405 if (!scsi_bidi_cmnd(scp)) {
3406 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3407 INSUFF_RES_ASCQ);
3408 return check_condition_result;
3409 }
3410 errsts = resp_read_dt0(scp, devip);
3411 if (errsts)
3412 return errsts;
3413 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3414 errsts = resp_write_dt0(scp, devip);
3415 if (errsts)
3416 return errsts;
3417 }
3418 lba = get_unaligned_be32(cmd + 2);
3419 num = get_unaligned_be16(cmd + 7);
3420 return resp_xdwriteread(scp, lba, num, devip);
3421 }
3422
3423 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3424 {
3425 struct sdebug_queue *sqp = sdebug_q_arr;
3426
3427 if (sdebug_mq_active) {
3428 u32 tag = blk_mq_unique_tag(cmnd->request);
3429 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3430
3431 if (unlikely(hwq >= submit_queues)) {
3432 pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3433 hwq %= submit_queues;
3434 }
3435 pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3436 return sqp + hwq;
3437 } else
3438 return sqp;
3439 }
3440
3441 /* Queued (deferred) command completions converge here. */
3442 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3443 {
3444 int qc_idx;
3445 int retiring = 0;
3446 unsigned long iflags;
3447 struct sdebug_queue *sqp;
3448 struct sdebug_queued_cmd *sqcp;
3449 struct scsi_cmnd *scp;
3450 struct sdebug_dev_info *devip;
3451
3452 qc_idx = sd_dp->qc_idx;
3453 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3454 if (sdebug_statistics) {
3455 atomic_inc(&sdebug_completions);
3456 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3457 atomic_inc(&sdebug_miss_cpus);
3458 }
3459 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3460 pr_err("wild qc_idx=%d\n", qc_idx);
3461 return;
3462 }
3463 spin_lock_irqsave(&sqp->qc_lock, iflags);
3464 sqcp = &sqp->qc_arr[qc_idx];
3465 scp = sqcp->a_cmnd;
3466 if (unlikely(scp == NULL)) {
3467 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3468 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3469 sd_dp->sqa_idx, qc_idx);
3470 return;
3471 }
3472 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3473 if (likely(devip))
3474 atomic_dec(&devip->num_in_q);
3475 else
3476 pr_err("devip=NULL\n");
3477 if (unlikely(atomic_read(&retired_max_queue) > 0))
3478 retiring = 1;
3479
3480 sqcp->a_cmnd = NULL;
3481 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3482 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3483 pr_err("Unexpected completion\n");
3484 return;
3485 }
3486
3487 if (unlikely(retiring)) { /* user has reduced max_queue */
3488 int k, retval;
3489
3490 retval = atomic_read(&retired_max_queue);
3491 if (qc_idx >= retval) {
3492 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3493 pr_err("index %d too large\n", retval);
3494 return;
3495 }
3496 k = find_last_bit(sqp->in_use_bm, retval);
3497 if ((k < sdebug_max_queue) || (k == retval))
3498 atomic_set(&retired_max_queue, 0);
3499 else
3500 atomic_set(&retired_max_queue, k + 1);
3501 }
3502 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3503 scp->scsi_done(scp); /* callback to mid level */
3504 }
3505
3506 /* When high resolution timer goes off this function is called. */
3507 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3508 {
3509 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3510 hrt);
3511 sdebug_q_cmd_complete(sd_dp);
3512 return HRTIMER_NORESTART;
3513 }
3514
3515 /* When work queue schedules work, it calls this function. */
3516 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3517 {
3518 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3519 ew.work);
3520 sdebug_q_cmd_complete(sd_dp);
3521 }
3522
3523 static bool got_shared_uuid;
3524 static uuid_be shared_uuid;
3525
3526 static struct sdebug_dev_info *sdebug_device_create(
3527 struct sdebug_host_info *sdbg_host, gfp_t flags)
3528 {
3529 struct sdebug_dev_info *devip;
3530
3531 devip = kzalloc(sizeof(*devip), flags);
3532 if (devip) {
3533 if (sdebug_uuid_ctl == 1)
3534 uuid_be_gen(&devip->lu_name);
3535 else if (sdebug_uuid_ctl == 2) {
3536 if (got_shared_uuid)
3537 devip->lu_name = shared_uuid;
3538 else {
3539 uuid_be_gen(&shared_uuid);
3540 got_shared_uuid = true;
3541 devip->lu_name = shared_uuid;
3542 }
3543 }
3544 devip->sdbg_host = sdbg_host;
3545 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3546 }
3547 return devip;
3548 }
3549
3550 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3551 {
3552 struct sdebug_host_info *sdbg_host;
3553 struct sdebug_dev_info *open_devip = NULL;
3554 struct sdebug_dev_info *devip;
3555
3556 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3557 if (!sdbg_host) {
3558 pr_err("Host info NULL\n");
3559 return NULL;
3560 }
3561 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3562 if ((devip->used) && (devip->channel == sdev->channel) &&
3563 (devip->target == sdev->id) &&
3564 (devip->lun == sdev->lun))
3565 return devip;
3566 else {
3567 if ((!devip->used) && (!open_devip))
3568 open_devip = devip;
3569 }
3570 }
3571 if (!open_devip) { /* try and make a new one */
3572 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3573 if (!open_devip) {
3574 pr_err("out of memory at line %d\n", __LINE__);
3575 return NULL;
3576 }
3577 }
3578
3579 open_devip->channel = sdev->channel;
3580 open_devip->target = sdev->id;
3581 open_devip->lun = sdev->lun;
3582 open_devip->sdbg_host = sdbg_host;
3583 atomic_set(&open_devip->num_in_q, 0);
3584 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3585 open_devip->used = true;
3586 return open_devip;
3587 }
3588
3589 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3590 {
3591 if (sdebug_verbose)
3592 pr_info("slave_alloc <%u %u %u %llu>\n",
3593 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3594 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3595 return 0;
3596 }
3597
3598 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3599 {
3600 struct sdebug_dev_info *devip =
3601 (struct sdebug_dev_info *)sdp->hostdata;
3602
3603 if (sdebug_verbose)
3604 pr_info("slave_configure <%u %u %u %llu>\n",
3605 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3606 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3607 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3608 if (devip == NULL) {
3609 devip = find_build_dev_info(sdp);
3610 if (devip == NULL)
3611 return 1; /* no resources, will be marked offline */
3612 }
3613 sdp->hostdata = devip;
3614 blk_queue_max_segment_size(sdp->request_queue, -1U);
3615 if (sdebug_no_uld)
3616 sdp->no_uld_attach = 1;
3617 return 0;
3618 }
3619
3620 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3621 {
3622 struct sdebug_dev_info *devip =
3623 (struct sdebug_dev_info *)sdp->hostdata;
3624
3625 if (sdebug_verbose)
3626 pr_info("slave_destroy <%u %u %u %llu>\n",
3627 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3628 if (devip) {
3629 /* make this slot available for re-use */
3630 devip->used = false;
3631 sdp->hostdata = NULL;
3632 }
3633 }
3634
3635 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3636 {
3637 if (!sd_dp)
3638 return;
3639 if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3640 hrtimer_cancel(&sd_dp->hrt);
3641 else if (sdebug_jdelay < 0)
3642 cancel_work_sync(&sd_dp->ew.work);
3643 }
3644
3645 /* If @cmnd found deletes its timer or work queue and returns true; else
3646 returns false */
3647 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3648 {
3649 unsigned long iflags;
3650 int j, k, qmax, r_qmax;
3651 struct sdebug_queue *sqp;
3652 struct sdebug_queued_cmd *sqcp;
3653 struct sdebug_dev_info *devip;
3654 struct sdebug_defer *sd_dp;
3655
3656 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3657 spin_lock_irqsave(&sqp->qc_lock, iflags);
3658 qmax = sdebug_max_queue;
3659 r_qmax = atomic_read(&retired_max_queue);
3660 if (r_qmax > qmax)
3661 qmax = r_qmax;
3662 for (k = 0; k < qmax; ++k) {
3663 if (test_bit(k, sqp->in_use_bm)) {
3664 sqcp = &sqp->qc_arr[k];
3665 if (cmnd != sqcp->a_cmnd)
3666 continue;
3667 /* found */
3668 devip = (struct sdebug_dev_info *)
3669 cmnd->device->hostdata;
3670 if (devip)
3671 atomic_dec(&devip->num_in_q);
3672 sqcp->a_cmnd = NULL;
3673 sd_dp = sqcp->sd_dp;
3674 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3675 stop_qc_helper(sd_dp);
3676 clear_bit(k, sqp->in_use_bm);
3677 return true;
3678 }
3679 }
3680 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3681 }
3682 return false;
3683 }
3684
3685 /* Deletes (stops) timers or work queues of all queued commands */
3686 static void stop_all_queued(void)
3687 {
3688 unsigned long iflags;
3689 int j, k;
3690 struct sdebug_queue *sqp;
3691 struct sdebug_queued_cmd *sqcp;
3692 struct sdebug_dev_info *devip;
3693 struct sdebug_defer *sd_dp;
3694
3695 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3696 spin_lock_irqsave(&sqp->qc_lock, iflags);
3697 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3698 if (test_bit(k, sqp->in_use_bm)) {
3699 sqcp = &sqp->qc_arr[k];
3700 if (sqcp->a_cmnd == NULL)
3701 continue;
3702 devip = (struct sdebug_dev_info *)
3703 sqcp->a_cmnd->device->hostdata;
3704 if (devip)
3705 atomic_dec(&devip->num_in_q);
3706 sqcp->a_cmnd = NULL;
3707 sd_dp = sqcp->sd_dp;
3708 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3709 stop_qc_helper(sd_dp);
3710 clear_bit(k, sqp->in_use_bm);
3711 spin_lock_irqsave(&sqp->qc_lock, iflags);
3712 }
3713 }
3714 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3715 }
3716 }
3717
3718 /* Free queued command memory on heap */
3719 static void free_all_queued(void)
3720 {
3721 int j, k;
3722 struct sdebug_queue *sqp;
3723 struct sdebug_queued_cmd *sqcp;
3724
3725 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3726 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3727 sqcp = &sqp->qc_arr[k];
3728 kfree(sqcp->sd_dp);
3729 sqcp->sd_dp = NULL;
3730 }
3731 }
3732 }
3733
3734 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3735 {
3736 bool ok;
3737
3738 ++num_aborts;
3739 if (SCpnt) {
3740 ok = stop_queued_cmnd(SCpnt);
3741 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3742 sdev_printk(KERN_INFO, SCpnt->device,
3743 "%s: command%s found\n", __func__,
3744 ok ? "" : " not");
3745 }
3746 return SUCCESS;
3747 }
3748
3749 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3750 {
3751 ++num_dev_resets;
3752 if (SCpnt && SCpnt->device) {
3753 struct scsi_device *sdp = SCpnt->device;
3754 struct sdebug_dev_info *devip =
3755 (struct sdebug_dev_info *)sdp->hostdata;
3756
3757 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3758 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3759 if (devip)
3760 set_bit(SDEBUG_UA_POR, devip->uas_bm);
3761 }
3762 return SUCCESS;
3763 }
3764
3765 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3766 {
3767 struct sdebug_host_info *sdbg_host;
3768 struct sdebug_dev_info *devip;
3769 struct scsi_device *sdp;
3770 struct Scsi_Host *hp;
3771 int k = 0;
3772
3773 ++num_target_resets;
3774 if (!SCpnt)
3775 goto lie;
3776 sdp = SCpnt->device;
3777 if (!sdp)
3778 goto lie;
3779 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3780 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3781 hp = sdp->host;
3782 if (!hp)
3783 goto lie;
3784 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3785 if (sdbg_host) {
3786 list_for_each_entry(devip,
3787 &sdbg_host->dev_info_list,
3788 dev_list)
3789 if (devip->target == sdp->id) {
3790 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3791 ++k;
3792 }
3793 }
3794 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3795 sdev_printk(KERN_INFO, sdp,
3796 "%s: %d device(s) found in target\n", __func__, k);
3797 lie:
3798 return SUCCESS;
3799 }
3800
3801 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3802 {
3803 struct sdebug_host_info *sdbg_host;
3804 struct sdebug_dev_info *devip;
3805 struct scsi_device * sdp;
3806 struct Scsi_Host * hp;
3807 int k = 0;
3808
3809 ++num_bus_resets;
3810 if (!(SCpnt && SCpnt->device))
3811 goto lie;
3812 sdp = SCpnt->device;
3813 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3814 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3815 hp = sdp->host;
3816 if (hp) {
3817 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3818 if (sdbg_host) {
3819 list_for_each_entry(devip,
3820 &sdbg_host->dev_info_list,
3821 dev_list) {
3822 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3823 ++k;
3824 }
3825 }
3826 }
3827 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3828 sdev_printk(KERN_INFO, sdp,
3829 "%s: %d device(s) found in host\n", __func__, k);
3830 lie:
3831 return SUCCESS;
3832 }
3833
3834 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3835 {
3836 struct sdebug_host_info * sdbg_host;
3837 struct sdebug_dev_info *devip;
3838 int k = 0;
3839
3840 ++num_host_resets;
3841 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3842 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3843 spin_lock(&sdebug_host_list_lock);
3844 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3845 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3846 dev_list) {
3847 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3848 ++k;
3849 }
3850 }
3851 spin_unlock(&sdebug_host_list_lock);
3852 stop_all_queued();
3853 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3854 sdev_printk(KERN_INFO, SCpnt->device,
3855 "%s: %d device(s) found\n", __func__, k);
3856 return SUCCESS;
3857 }
3858
3859 static void __init sdebug_build_parts(unsigned char *ramp,
3860 unsigned long store_size)
3861 {
3862 struct partition * pp;
3863 int starts[SDEBUG_MAX_PARTS + 2];
3864 int sectors_per_part, num_sectors, k;
3865 int heads_by_sects, start_sec, end_sec;
3866
3867 /* assume partition table already zeroed */
3868 if ((sdebug_num_parts < 1) || (store_size < 1048576))
3869 return;
3870 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3871 sdebug_num_parts = SDEBUG_MAX_PARTS;
3872 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3873 }
3874 num_sectors = (int)sdebug_store_sectors;
3875 sectors_per_part = (num_sectors - sdebug_sectors_per)
3876 / sdebug_num_parts;
3877 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3878 starts[0] = sdebug_sectors_per;
3879 for (k = 1; k < sdebug_num_parts; ++k)
3880 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3881 * heads_by_sects;
3882 starts[sdebug_num_parts] = num_sectors;
3883 starts[sdebug_num_parts + 1] = 0;
3884
3885 ramp[510] = 0x55; /* magic partition markings */
3886 ramp[511] = 0xAA;
3887 pp = (struct partition *)(ramp + 0x1be);
3888 for (k = 0; starts[k + 1]; ++k, ++pp) {
3889 start_sec = starts[k];
3890 end_sec = starts[k + 1] - 1;
3891 pp->boot_ind = 0;
3892
3893 pp->cyl = start_sec / heads_by_sects;
3894 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3895 / sdebug_sectors_per;
3896 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3897
3898 pp->end_cyl = end_sec / heads_by_sects;
3899 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3900 / sdebug_sectors_per;
3901 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3902
3903 pp->start_sect = cpu_to_le32(start_sec);
3904 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3905 pp->sys_ind = 0x83; /* plain Linux partition */
3906 }
3907 }
3908
3909 static void block_unblock_all_queues(bool block)
3910 {
3911 int j;
3912 struct sdebug_queue *sqp;
3913
3914 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3915 atomic_set(&sqp->blocked, (int)block);
3916 }
3917
3918 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3919 * commands will be processed normally before triggers occur.
3920 */
3921 static void tweak_cmnd_count(void)
3922 {
3923 int count, modulo;
3924
3925 modulo = abs(sdebug_every_nth);
3926 if (modulo < 2)
3927 return;
3928 block_unblock_all_queues(true);
3929 count = atomic_read(&sdebug_cmnd_count);
3930 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3931 block_unblock_all_queues(false);
3932 }
3933
3934 static void clear_queue_stats(void)
3935 {
3936 atomic_set(&sdebug_cmnd_count, 0);
3937 atomic_set(&sdebug_completions, 0);
3938 atomic_set(&sdebug_miss_cpus, 0);
3939 atomic_set(&sdebug_a_tsf, 0);
3940 }
3941
3942 static void setup_inject(struct sdebug_queue *sqp,
3943 struct sdebug_queued_cmd *sqcp)
3944 {
3945 if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3946 return;
3947 sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3948 sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3949 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3950 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3951 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3952 }
3953
3954 /* Complete the processing of the thread that queued a SCSI command to this
3955 * driver. It either completes the command by calling cmnd_done() or
3956 * schedules a hr timer or work queue then returns 0. Returns
3957 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3958 */
3959 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3960 int scsi_result, int delta_jiff)
3961 {
3962 unsigned long iflags;
3963 int k, num_in_q, qdepth, inject;
3964 struct sdebug_queue *sqp;
3965 struct sdebug_queued_cmd *sqcp;
3966 struct scsi_device *sdp;
3967 struct sdebug_defer *sd_dp;
3968
3969 if (unlikely(devip == NULL)) {
3970 if (scsi_result == 0)
3971 scsi_result = DID_NO_CONNECT << 16;
3972 goto respond_in_thread;
3973 }
3974 sdp = cmnd->device;
3975
3976 if (unlikely(sdebug_verbose && scsi_result))
3977 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3978 __func__, scsi_result);
3979 if (delta_jiff == 0)
3980 goto respond_in_thread;
3981
3982 /* schedule the response at a later time if resources permit */
3983 sqp = get_queue(cmnd);
3984 spin_lock_irqsave(&sqp->qc_lock, iflags);
3985 if (unlikely(atomic_read(&sqp->blocked))) {
3986 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3987 return SCSI_MLQUEUE_HOST_BUSY;
3988 }
3989 num_in_q = atomic_read(&devip->num_in_q);
3990 qdepth = cmnd->device->queue_depth;
3991 inject = 0;
3992 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
3993 if (scsi_result) {
3994 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3995 goto respond_in_thread;
3996 } else
3997 scsi_result = device_qfull_result;
3998 } else if (unlikely(sdebug_every_nth &&
3999 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4000 (scsi_result == 0))) {
4001 if ((num_in_q == (qdepth - 1)) &&
4002 (atomic_inc_return(&sdebug_a_tsf) >=
4003 abs(sdebug_every_nth))) {
4004 atomic_set(&sdebug_a_tsf, 0);
4005 inject = 1;
4006 scsi_result = device_qfull_result;
4007 }
4008 }
4009
4010 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4011 if (unlikely(k >= sdebug_max_queue)) {
4012 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4013 if (scsi_result)
4014 goto respond_in_thread;
4015 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4016 scsi_result = device_qfull_result;
4017 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4018 sdev_printk(KERN_INFO, sdp,
4019 "%s: max_queue=%d exceeded, %s\n",
4020 __func__, sdebug_max_queue,
4021 (scsi_result ? "status: TASK SET FULL" :
4022 "report: host busy"));
4023 if (scsi_result)
4024 goto respond_in_thread;
4025 else
4026 return SCSI_MLQUEUE_HOST_BUSY;
4027 }
4028 __set_bit(k, sqp->in_use_bm);
4029 atomic_inc(&devip->num_in_q);
4030 sqcp = &sqp->qc_arr[k];
4031 sqcp->a_cmnd = cmnd;
4032 cmnd->host_scribble = (unsigned char *)sqcp;
4033 cmnd->result = scsi_result;
4034 sd_dp = sqcp->sd_dp;
4035 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4036 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4037 setup_inject(sqp, sqcp);
4038 if (delta_jiff > 0 || sdebug_ndelay > 0) {
4039 ktime_t kt;
4040
4041 if (delta_jiff > 0) {
4042 struct timespec ts;
4043
4044 jiffies_to_timespec(delta_jiff, &ts);
4045 kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4046 } else
4047 kt = ktime_set(0, sdebug_ndelay);
4048 if (NULL == sd_dp) {
4049 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4050 if (NULL == sd_dp)
4051 return SCSI_MLQUEUE_HOST_BUSY;
4052 sqcp->sd_dp = sd_dp;
4053 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4054 HRTIMER_MODE_REL_PINNED);
4055 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4056 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4057 sd_dp->qc_idx = k;
4058 }
4059 if (sdebug_statistics)
4060 sd_dp->issuing_cpu = raw_smp_processor_id();
4061 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4062 } else { /* jdelay < 0, use work queue */
4063 if (NULL == sd_dp) {
4064 sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4065 if (NULL == sd_dp)
4066 return SCSI_MLQUEUE_HOST_BUSY;
4067 sqcp->sd_dp = sd_dp;
4068 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4069 sd_dp->qc_idx = k;
4070 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4071 }
4072 if (sdebug_statistics)
4073 sd_dp->issuing_cpu = raw_smp_processor_id();
4074 schedule_work(&sd_dp->ew.work);
4075 }
4076 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4077 (scsi_result == device_qfull_result)))
4078 sdev_printk(KERN_INFO, sdp,
4079 "%s: num_in_q=%d +1, %s%s\n", __func__,
4080 num_in_q, (inject ? "<inject> " : ""),
4081 "status: TASK SET FULL");
4082 return 0;
4083
4084 respond_in_thread: /* call back to mid-layer using invocation thread */
4085 cmnd->result = scsi_result;
4086 cmnd->scsi_done(cmnd);
4087 return 0;
4088 }
4089
4090 /* Note: The following macros create attribute files in the
4091 /sys/module/scsi_debug/parameters directory. Unfortunately this
4092 driver is unaware of a change and cannot trigger auxiliary actions
4093 as it can when the corresponding attribute in the
4094 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4095 */
4096 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4097 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4098 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4099 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4100 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4101 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4102 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4103 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4104 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4105 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4106 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4107 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4108 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4109 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4110 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4111 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4112 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4113 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4114 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4115 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4116 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4117 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4118 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4119 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4120 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4121 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4122 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4123 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4124 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4125 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4126 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4127 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4128 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4129 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4130 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4131 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4132 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4133 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4134 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4135 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4136 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4137 S_IRUGO | S_IWUSR);
4138 module_param_named(write_same_length, sdebug_write_same_length, int,
4139 S_IRUGO | S_IWUSR);
4140
4141 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4142 MODULE_DESCRIPTION("SCSI debug adapter driver");
4143 MODULE_LICENSE("GPL");
4144 MODULE_VERSION(SDEBUG_VERSION);
4145
4146 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4147 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4148 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4149 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4150 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4151 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4152 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4153 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4154 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4155 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4156 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4157 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4158 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4159 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4160 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4161 MODULE_PARM_DESC(lbprz,
4162 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4163 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4164 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4165 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4166 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4167 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4168 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4169 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4170 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4171 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4172 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4173 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4174 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4175 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4176 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4177 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4178 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4179 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4180 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4181 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4182 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4183 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4184 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4185 MODULE_PARM_DESC(uuid_ctl,
4186 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4187 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4188 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4189 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4190
4191 #define SDEBUG_INFO_LEN 256
4192 static char sdebug_info[SDEBUG_INFO_LEN];
4193
4194 static const char * scsi_debug_info(struct Scsi_Host * shp)
4195 {
4196 int k;
4197
4198 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4199 my_name, SDEBUG_VERSION, sdebug_version_date);
4200 if (k >= (SDEBUG_INFO_LEN - 1))
4201 return sdebug_info;
4202 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4203 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4204 sdebug_dev_size_mb, sdebug_opts, submit_queues,
4205 "statistics", (int)sdebug_statistics);
4206 return sdebug_info;
4207 }
4208
4209 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4210 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4211 int length)
4212 {
4213 char arr[16];
4214 int opts;
4215 int minLen = length > 15 ? 15 : length;
4216
4217 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4218 return -EACCES;
4219 memcpy(arr, buffer, minLen);
4220 arr[minLen] = '\0';
4221 if (1 != sscanf(arr, "%d", &opts))
4222 return -EINVAL;
4223 sdebug_opts = opts;
4224 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4225 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4226 if (sdebug_every_nth != 0)
4227 tweak_cmnd_count();
4228 return length;
4229 }
4230
4231 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4232 * same for each scsi_debug host (if more than one). Some of the counters
4233 * output are not atomics so might be inaccurate in a busy system. */
4234 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4235 {
4236 int f, j, l;
4237 struct sdebug_queue *sqp;
4238
4239 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4240 SDEBUG_VERSION, sdebug_version_date);
4241 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4242 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4243 sdebug_opts, sdebug_every_nth);
4244 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4245 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4246 sdebug_sector_size, "bytes");
4247 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4248 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4249 num_aborts);
4250 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4251 num_dev_resets, num_target_resets, num_bus_resets,
4252 num_host_resets);
4253 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4254 dix_reads, dix_writes, dif_errors);
4255 seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4256 TICK_NSEC / 1000, "statistics", sdebug_statistics,
4257 sdebug_mq_active);
4258 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4259 atomic_read(&sdebug_cmnd_count),
4260 atomic_read(&sdebug_completions),
4261 "miss_cpus", atomic_read(&sdebug_miss_cpus),
4262 atomic_read(&sdebug_a_tsf));
4263
4264 seq_printf(m, "submit_queues=%d\n", submit_queues);
4265 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4266 seq_printf(m, " queue %d:\n", j);
4267 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4268 if (f != sdebug_max_queue) {
4269 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4270 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
4271 "first,last bits", f, l);
4272 }
4273 }
4274 return 0;
4275 }
4276
4277 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4278 {
4279 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4280 }
4281 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4282 * of delay is jiffies.
4283 */
4284 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4285 size_t count)
4286 {
4287 int jdelay, res;
4288
4289 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4290 res = count;
4291 if (sdebug_jdelay != jdelay) {
4292 int j, k;
4293 struct sdebug_queue *sqp;
4294
4295 block_unblock_all_queues(true);
4296 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4297 ++j, ++sqp) {
4298 k = find_first_bit(sqp->in_use_bm,
4299 sdebug_max_queue);
4300 if (k != sdebug_max_queue) {
4301 res = -EBUSY; /* queued commands */
4302 break;
4303 }
4304 }
4305 if (res > 0) {
4306 /* make sure sdebug_defer instances get
4307 * re-allocated for new delay variant */
4308 free_all_queued();
4309 sdebug_jdelay = jdelay;
4310 sdebug_ndelay = 0;
4311 }
4312 block_unblock_all_queues(false);
4313 }
4314 return res;
4315 }
4316 return -EINVAL;
4317 }
4318 static DRIVER_ATTR_RW(delay);
4319
4320 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4321 {
4322 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4323 }
4324 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4325 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4326 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4327 size_t count)
4328 {
4329 int ndelay, res;
4330
4331 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4332 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4333 res = count;
4334 if (sdebug_ndelay != ndelay) {
4335 int j, k;
4336 struct sdebug_queue *sqp;
4337
4338 block_unblock_all_queues(true);
4339 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4340 ++j, ++sqp) {
4341 k = find_first_bit(sqp->in_use_bm,
4342 sdebug_max_queue);
4343 if (k != sdebug_max_queue) {
4344 res = -EBUSY; /* queued commands */
4345 break;
4346 }
4347 }
4348 if (res > 0) {
4349 /* make sure sdebug_defer instances get
4350 * re-allocated for new delay variant */
4351 free_all_queued();
4352 sdebug_ndelay = ndelay;
4353 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4354 : DEF_JDELAY;
4355 }
4356 block_unblock_all_queues(false);
4357 }
4358 return res;
4359 }
4360 return -EINVAL;
4361 }
4362 static DRIVER_ATTR_RW(ndelay);
4363
4364 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4365 {
4366 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4367 }
4368
4369 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4370 size_t count)
4371 {
4372 int opts;
4373 char work[20];
4374
4375 if (1 == sscanf(buf, "%10s", work)) {
4376 if (0 == strncasecmp(work,"0x", 2)) {
4377 if (1 == sscanf(&work[2], "%x", &opts))
4378 goto opts_done;
4379 } else {
4380 if (1 == sscanf(work, "%d", &opts))
4381 goto opts_done;
4382 }
4383 }
4384 return -EINVAL;
4385 opts_done:
4386 sdebug_opts = opts;
4387 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4388 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4389 tweak_cmnd_count();
4390 return count;
4391 }
4392 static DRIVER_ATTR_RW(opts);
4393
4394 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4395 {
4396 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4397 }
4398 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4399 size_t count)
4400 {
4401 int n;
4402
4403 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4404 sdebug_ptype = n;
4405 return count;
4406 }
4407 return -EINVAL;
4408 }
4409 static DRIVER_ATTR_RW(ptype);
4410
4411 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4412 {
4413 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4414 }
4415 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4416 size_t count)
4417 {
4418 int n;
4419
4420 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4421 sdebug_dsense = n;
4422 return count;
4423 }
4424 return -EINVAL;
4425 }
4426 static DRIVER_ATTR_RW(dsense);
4427
4428 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4429 {
4430 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4431 }
4432 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4433 size_t count)
4434 {
4435 int n;
4436
4437 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4438 n = (n > 0);
4439 sdebug_fake_rw = (sdebug_fake_rw > 0);
4440 if (sdebug_fake_rw != n) {
4441 if ((0 == n) && (NULL == fake_storep)) {
4442 unsigned long sz =
4443 (unsigned long)sdebug_dev_size_mb *
4444 1048576;
4445
4446 fake_storep = vmalloc(sz);
4447 if (NULL == fake_storep) {
4448 pr_err("out of memory, 9\n");
4449 return -ENOMEM;
4450 }
4451 memset(fake_storep, 0, sz);
4452 }
4453 sdebug_fake_rw = n;
4454 }
4455 return count;
4456 }
4457 return -EINVAL;
4458 }
4459 static DRIVER_ATTR_RW(fake_rw);
4460
4461 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4462 {
4463 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4464 }
4465 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4466 size_t count)
4467 {
4468 int n;
4469
4470 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4471 sdebug_no_lun_0 = n;
4472 return count;
4473 }
4474 return -EINVAL;
4475 }
4476 static DRIVER_ATTR_RW(no_lun_0);
4477
4478 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4479 {
4480 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4481 }
4482 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4483 size_t count)
4484 {
4485 int n;
4486
4487 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4488 sdebug_num_tgts = n;
4489 sdebug_max_tgts_luns();
4490 return count;
4491 }
4492 return -EINVAL;
4493 }
4494 static DRIVER_ATTR_RW(num_tgts);
4495
4496 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4497 {
4498 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4499 }
4500 static DRIVER_ATTR_RO(dev_size_mb);
4501
4502 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4503 {
4504 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4505 }
4506 static DRIVER_ATTR_RO(num_parts);
4507
4508 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4509 {
4510 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4511 }
4512 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4513 size_t count)
4514 {
4515 int nth;
4516
4517 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4518 sdebug_every_nth = nth;
4519 if (nth && !sdebug_statistics) {
4520 pr_info("every_nth needs statistics=1, set it\n");
4521 sdebug_statistics = true;
4522 }
4523 tweak_cmnd_count();
4524 return count;
4525 }
4526 return -EINVAL;
4527 }
4528 static DRIVER_ATTR_RW(every_nth);
4529
4530 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4531 {
4532 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4533 }
4534 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4535 size_t count)
4536 {
4537 int n;
4538 bool changed;
4539
4540 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4541 if (n > 256) {
4542 pr_warn("max_luns can be no more than 256\n");
4543 return -EINVAL;
4544 }
4545 changed = (sdebug_max_luns != n);
4546 sdebug_max_luns = n;
4547 sdebug_max_tgts_luns();
4548 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
4549 struct sdebug_host_info *sdhp;
4550 struct sdebug_dev_info *dp;
4551
4552 spin_lock(&sdebug_host_list_lock);
4553 list_for_each_entry(sdhp, &sdebug_host_list,
4554 host_list) {
4555 list_for_each_entry(dp, &sdhp->dev_info_list,
4556 dev_list) {
4557 set_bit(SDEBUG_UA_LUNS_CHANGED,
4558 dp->uas_bm);
4559 }
4560 }
4561 spin_unlock(&sdebug_host_list_lock);
4562 }
4563 return count;
4564 }
4565 return -EINVAL;
4566 }
4567 static DRIVER_ATTR_RW(max_luns);
4568
4569 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4570 {
4571 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4572 }
4573 /* N.B. max_queue can be changed while there are queued commands. In flight
4574 * commands beyond the new max_queue will be completed. */
4575 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4576 size_t count)
4577 {
4578 int j, n, k, a;
4579 struct sdebug_queue *sqp;
4580
4581 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4582 (n <= SDEBUG_CANQUEUE)) {
4583 block_unblock_all_queues(true);
4584 k = 0;
4585 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4586 ++j, ++sqp) {
4587 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4588 if (a > k)
4589 k = a;
4590 }
4591 sdebug_max_queue = n;
4592 if (k == SDEBUG_CANQUEUE)
4593 atomic_set(&retired_max_queue, 0);
4594 else if (k >= n)
4595 atomic_set(&retired_max_queue, k + 1);
4596 else
4597 atomic_set(&retired_max_queue, 0);
4598 block_unblock_all_queues(false);
4599 return count;
4600 }
4601 return -EINVAL;
4602 }
4603 static DRIVER_ATTR_RW(max_queue);
4604
4605 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4606 {
4607 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4608 }
4609 static DRIVER_ATTR_RO(no_uld);
4610
4611 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4612 {
4613 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4614 }
4615 static DRIVER_ATTR_RO(scsi_level);
4616
4617 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4618 {
4619 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4620 }
4621 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4622 size_t count)
4623 {
4624 int n;
4625 bool changed;
4626
4627 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4628 changed = (sdebug_virtual_gb != n);
4629 sdebug_virtual_gb = n;
4630 sdebug_capacity = get_sdebug_capacity();
4631 if (changed) {
4632 struct sdebug_host_info *sdhp;
4633 struct sdebug_dev_info *dp;
4634
4635 spin_lock(&sdebug_host_list_lock);
4636 list_for_each_entry(sdhp, &sdebug_host_list,
4637 host_list) {
4638 list_for_each_entry(dp, &sdhp->dev_info_list,
4639 dev_list) {
4640 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4641 dp->uas_bm);
4642 }
4643 }
4644 spin_unlock(&sdebug_host_list_lock);
4645 }
4646 return count;
4647 }
4648 return -EINVAL;
4649 }
4650 static DRIVER_ATTR_RW(virtual_gb);
4651
4652 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4653 {
4654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4655 }
4656
4657 static int sdebug_add_adapter(void);
4658 static void sdebug_remove_adapter(void);
4659
4660 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4661 size_t count)
4662 {
4663 int delta_hosts;
4664
4665 if (sscanf(buf, "%d", &delta_hosts) != 1)
4666 return -EINVAL;
4667 if (delta_hosts > 0) {
4668 do {
4669 sdebug_add_adapter();
4670 } while (--delta_hosts);
4671 } else if (delta_hosts < 0) {
4672 do {
4673 sdebug_remove_adapter();
4674 } while (++delta_hosts);
4675 }
4676 return count;
4677 }
4678 static DRIVER_ATTR_RW(add_host);
4679
4680 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4681 {
4682 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4683 }
4684 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4685 size_t count)
4686 {
4687 int n;
4688
4689 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4690 sdebug_vpd_use_hostno = n;
4691 return count;
4692 }
4693 return -EINVAL;
4694 }
4695 static DRIVER_ATTR_RW(vpd_use_hostno);
4696
4697 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4698 {
4699 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4700 }
4701 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4702 size_t count)
4703 {
4704 int n;
4705
4706 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4707 if (n > 0)
4708 sdebug_statistics = true;
4709 else {
4710 clear_queue_stats();
4711 sdebug_statistics = false;
4712 }
4713 return count;
4714 }
4715 return -EINVAL;
4716 }
4717 static DRIVER_ATTR_RW(statistics);
4718
4719 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4720 {
4721 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4722 }
4723 static DRIVER_ATTR_RO(sector_size);
4724
4725 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4726 {
4727 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4728 }
4729 static DRIVER_ATTR_RO(submit_queues);
4730
4731 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4732 {
4733 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4734 }
4735 static DRIVER_ATTR_RO(dix);
4736
4737 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4738 {
4739 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4740 }
4741 static DRIVER_ATTR_RO(dif);
4742
4743 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4744 {
4745 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4746 }
4747 static DRIVER_ATTR_RO(guard);
4748
4749 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4750 {
4751 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4752 }
4753 static DRIVER_ATTR_RO(ato);
4754
4755 static ssize_t map_show(struct device_driver *ddp, char *buf)
4756 {
4757 ssize_t count;
4758
4759 if (!scsi_debug_lbp())
4760 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4761 sdebug_store_sectors);
4762
4763 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4764 (int)map_size, map_storep);
4765 buf[count++] = '\n';
4766 buf[count] = '\0';
4767
4768 return count;
4769 }
4770 static DRIVER_ATTR_RO(map);
4771
4772 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4773 {
4774 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4775 }
4776 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4777 size_t count)
4778 {
4779 int n;
4780
4781 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4782 sdebug_removable = (n > 0);
4783 return count;
4784 }
4785 return -EINVAL;
4786 }
4787 static DRIVER_ATTR_RW(removable);
4788
4789 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4790 {
4791 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4792 }
4793 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4794 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4795 size_t count)
4796 {
4797 int n;
4798
4799 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4800 sdebug_host_lock = (n > 0);
4801 return count;
4802 }
4803 return -EINVAL;
4804 }
4805 static DRIVER_ATTR_RW(host_lock);
4806
4807 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4808 {
4809 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4810 }
4811 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4812 size_t count)
4813 {
4814 int n;
4815
4816 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4817 sdebug_strict = (n > 0);
4818 return count;
4819 }
4820 return -EINVAL;
4821 }
4822 static DRIVER_ATTR_RW(strict);
4823
4824 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4825 {
4826 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4827 }
4828 static DRIVER_ATTR_RO(uuid_ctl);
4829
4830
4831 /* Note: The following array creates attribute files in the
4832 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4833 files (over those found in the /sys/module/scsi_debug/parameters
4834 directory) is that auxiliary actions can be triggered when an attribute
4835 is changed. For example see: sdebug_add_host_store() above.
4836 */
4837
4838 static struct attribute *sdebug_drv_attrs[] = {
4839 &driver_attr_delay.attr,
4840 &driver_attr_opts.attr,
4841 &driver_attr_ptype.attr,
4842 &driver_attr_dsense.attr,
4843 &driver_attr_fake_rw.attr,
4844 &driver_attr_no_lun_0.attr,
4845 &driver_attr_num_tgts.attr,
4846 &driver_attr_dev_size_mb.attr,
4847 &driver_attr_num_parts.attr,
4848 &driver_attr_every_nth.attr,
4849 &driver_attr_max_luns.attr,
4850 &driver_attr_max_queue.attr,
4851 &driver_attr_no_uld.attr,
4852 &driver_attr_scsi_level.attr,
4853 &driver_attr_virtual_gb.attr,
4854 &driver_attr_add_host.attr,
4855 &driver_attr_vpd_use_hostno.attr,
4856 &driver_attr_sector_size.attr,
4857 &driver_attr_statistics.attr,
4858 &driver_attr_submit_queues.attr,
4859 &driver_attr_dix.attr,
4860 &driver_attr_dif.attr,
4861 &driver_attr_guard.attr,
4862 &driver_attr_ato.attr,
4863 &driver_attr_map.attr,
4864 &driver_attr_removable.attr,
4865 &driver_attr_host_lock.attr,
4866 &driver_attr_ndelay.attr,
4867 &driver_attr_strict.attr,
4868 &driver_attr_uuid_ctl.attr,
4869 NULL,
4870 };
4871 ATTRIBUTE_GROUPS(sdebug_drv);
4872
4873 static struct device *pseudo_primary;
4874
4875 static int __init scsi_debug_init(void)
4876 {
4877 unsigned long sz;
4878 int host_to_add;
4879 int k;
4880 int ret;
4881
4882 atomic_set(&retired_max_queue, 0);
4883
4884 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4885 pr_warn("ndelay must be less than 1 second, ignored\n");
4886 sdebug_ndelay = 0;
4887 } else if (sdebug_ndelay > 0)
4888 sdebug_jdelay = JDELAY_OVERRIDDEN;
4889
4890 switch (sdebug_sector_size) {
4891 case 512:
4892 case 1024:
4893 case 2048:
4894 case 4096:
4895 break;
4896 default:
4897 pr_err("invalid sector_size %d\n", sdebug_sector_size);
4898 return -EINVAL;
4899 }
4900
4901 switch (sdebug_dif) {
4902
4903 case SD_DIF_TYPE0_PROTECTION:
4904 break;
4905 case SD_DIF_TYPE1_PROTECTION:
4906 case SD_DIF_TYPE2_PROTECTION:
4907 case SD_DIF_TYPE3_PROTECTION:
4908 have_dif_prot = true;
4909 break;
4910
4911 default:
4912 pr_err("dif must be 0, 1, 2 or 3\n");
4913 return -EINVAL;
4914 }
4915
4916 if (sdebug_guard > 1) {
4917 pr_err("guard must be 0 or 1\n");
4918 return -EINVAL;
4919 }
4920
4921 if (sdebug_ato > 1) {
4922 pr_err("ato must be 0 or 1\n");
4923 return -EINVAL;
4924 }
4925
4926 if (sdebug_physblk_exp > 15) {
4927 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4928 return -EINVAL;
4929 }
4930 if (sdebug_max_luns > 256) {
4931 pr_warn("max_luns can be no more than 256, use default\n");
4932 sdebug_max_luns = DEF_MAX_LUNS;
4933 }
4934
4935 if (sdebug_lowest_aligned > 0x3fff) {
4936 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4937 return -EINVAL;
4938 }
4939
4940 if (submit_queues < 1) {
4941 pr_err("submit_queues must be 1 or more\n");
4942 return -EINVAL;
4943 }
4944 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4945 GFP_KERNEL);
4946 if (sdebug_q_arr == NULL)
4947 return -ENOMEM;
4948 for (k = 0; k < submit_queues; ++k)
4949 spin_lock_init(&sdebug_q_arr[k].qc_lock);
4950
4951 if (sdebug_dev_size_mb < 1)
4952 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
4953 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4954 sdebug_store_sectors = sz / sdebug_sector_size;
4955 sdebug_capacity = get_sdebug_capacity();
4956
4957 /* play around with geometry, don't waste too much on track 0 */
4958 sdebug_heads = 8;
4959 sdebug_sectors_per = 32;
4960 if (sdebug_dev_size_mb >= 256)
4961 sdebug_heads = 64;
4962 else if (sdebug_dev_size_mb >= 16)
4963 sdebug_heads = 32;
4964 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4965 (sdebug_sectors_per * sdebug_heads);
4966 if (sdebug_cylinders_per >= 1024) {
4967 /* other LLDs do this; implies >= 1GB ram disk ... */
4968 sdebug_heads = 255;
4969 sdebug_sectors_per = 63;
4970 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4971 (sdebug_sectors_per * sdebug_heads);
4972 }
4973
4974 if (sdebug_fake_rw == 0) {
4975 fake_storep = vmalloc(sz);
4976 if (NULL == fake_storep) {
4977 pr_err("out of memory, 1\n");
4978 ret = -ENOMEM;
4979 goto free_q_arr;
4980 }
4981 memset(fake_storep, 0, sz);
4982 if (sdebug_num_parts > 0)
4983 sdebug_build_parts(fake_storep, sz);
4984 }
4985
4986 if (sdebug_dix) {
4987 int dif_size;
4988
4989 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4990 dif_storep = vmalloc(dif_size);
4991
4992 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4993
4994 if (dif_storep == NULL) {
4995 pr_err("out of mem. (DIX)\n");
4996 ret = -ENOMEM;
4997 goto free_vm;
4998 }
4999
5000 memset(dif_storep, 0xff, dif_size);
5001 }
5002
5003 /* Logical Block Provisioning */
5004 if (scsi_debug_lbp()) {
5005 sdebug_unmap_max_blocks =
5006 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5007
5008 sdebug_unmap_max_desc =
5009 clamp(sdebug_unmap_max_desc, 0U, 256U);
5010
5011 sdebug_unmap_granularity =
5012 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5013
5014 if (sdebug_unmap_alignment &&
5015 sdebug_unmap_granularity <=
5016 sdebug_unmap_alignment) {
5017 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5018 ret = -EINVAL;
5019 goto free_vm;
5020 }
5021
5022 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5023 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5024
5025 pr_info("%lu provisioning blocks\n", map_size);
5026
5027 if (map_storep == NULL) {
5028 pr_err("out of mem. (MAP)\n");
5029 ret = -ENOMEM;
5030 goto free_vm;
5031 }
5032
5033 bitmap_zero(map_storep, map_size);
5034
5035 /* Map first 1KB for partition table */
5036 if (sdebug_num_parts)
5037 map_region(0, 2);
5038 }
5039
5040 pseudo_primary = root_device_register("pseudo_0");
5041 if (IS_ERR(pseudo_primary)) {
5042 pr_warn("root_device_register() error\n");
5043 ret = PTR_ERR(pseudo_primary);
5044 goto free_vm;
5045 }
5046 ret = bus_register(&pseudo_lld_bus);
5047 if (ret < 0) {
5048 pr_warn("bus_register error: %d\n", ret);
5049 goto dev_unreg;
5050 }
5051 ret = driver_register(&sdebug_driverfs_driver);
5052 if (ret < 0) {
5053 pr_warn("driver_register error: %d\n", ret);
5054 goto bus_unreg;
5055 }
5056
5057 host_to_add = sdebug_add_host;
5058 sdebug_add_host = 0;
5059
5060 for (k = 0; k < host_to_add; k++) {
5061 if (sdebug_add_adapter()) {
5062 pr_err("sdebug_add_adapter failed k=%d\n", k);
5063 break;
5064 }
5065 }
5066
5067 if (sdebug_verbose)
5068 pr_info("built %d host(s)\n", sdebug_add_host);
5069
5070 return 0;
5071
5072 bus_unreg:
5073 bus_unregister(&pseudo_lld_bus);
5074 dev_unreg:
5075 root_device_unregister(pseudo_primary);
5076 free_vm:
5077 vfree(map_storep);
5078 vfree(dif_storep);
5079 vfree(fake_storep);
5080 free_q_arr:
5081 kfree(sdebug_q_arr);
5082 return ret;
5083 }
5084
5085 static void __exit scsi_debug_exit(void)
5086 {
5087 int k = sdebug_add_host;
5088
5089 stop_all_queued();
5090 free_all_queued();
5091 for (; k; k--)
5092 sdebug_remove_adapter();
5093 driver_unregister(&sdebug_driverfs_driver);
5094 bus_unregister(&pseudo_lld_bus);
5095 root_device_unregister(pseudo_primary);
5096
5097 vfree(dif_storep);
5098 vfree(fake_storep);
5099 kfree(sdebug_q_arr);
5100 }
5101
5102 device_initcall(scsi_debug_init);
5103 module_exit(scsi_debug_exit);
5104
5105 static void sdebug_release_adapter(struct device * dev)
5106 {
5107 struct sdebug_host_info *sdbg_host;
5108
5109 sdbg_host = to_sdebug_host(dev);
5110 kfree(sdbg_host);
5111 }
5112
5113 static int sdebug_add_adapter(void)
5114 {
5115 int k, devs_per_host;
5116 int error = 0;
5117 struct sdebug_host_info *sdbg_host;
5118 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5119
5120 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5121 if (NULL == sdbg_host) {
5122 pr_err("out of memory at line %d\n", __LINE__);
5123 return -ENOMEM;
5124 }
5125
5126 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5127
5128 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5129 for (k = 0; k < devs_per_host; k++) {
5130 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5131 if (!sdbg_devinfo) {
5132 pr_err("out of memory at line %d\n", __LINE__);
5133 error = -ENOMEM;
5134 goto clean;
5135 }
5136 }
5137
5138 spin_lock(&sdebug_host_list_lock);
5139 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5140 spin_unlock(&sdebug_host_list_lock);
5141
5142 sdbg_host->dev.bus = &pseudo_lld_bus;
5143 sdbg_host->dev.parent = pseudo_primary;
5144 sdbg_host->dev.release = &sdebug_release_adapter;
5145 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5146
5147 error = device_register(&sdbg_host->dev);
5148
5149 if (error)
5150 goto clean;
5151
5152 ++sdebug_add_host;
5153 return error;
5154
5155 clean:
5156 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5157 dev_list) {
5158 list_del(&sdbg_devinfo->dev_list);
5159 kfree(sdbg_devinfo);
5160 }
5161
5162 kfree(sdbg_host);
5163 return error;
5164 }
5165
5166 static void sdebug_remove_adapter(void)
5167 {
5168 struct sdebug_host_info * sdbg_host = NULL;
5169
5170 spin_lock(&sdebug_host_list_lock);
5171 if (!list_empty(&sdebug_host_list)) {
5172 sdbg_host = list_entry(sdebug_host_list.prev,
5173 struct sdebug_host_info, host_list);
5174 list_del(&sdbg_host->host_list);
5175 }
5176 spin_unlock(&sdebug_host_list_lock);
5177
5178 if (!sdbg_host)
5179 return;
5180
5181 device_unregister(&sdbg_host->dev);
5182 --sdebug_add_host;
5183 }
5184
5185 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5186 {
5187 int num_in_q = 0;
5188 struct sdebug_dev_info *devip;
5189
5190 block_unblock_all_queues(true);
5191 devip = (struct sdebug_dev_info *)sdev->hostdata;
5192 if (NULL == devip) {
5193 block_unblock_all_queues(false);
5194 return -ENODEV;
5195 }
5196 num_in_q = atomic_read(&devip->num_in_q);
5197
5198 if (qdepth < 1)
5199 qdepth = 1;
5200 /* allow to exceed max host qc_arr elements for testing */
5201 if (qdepth > SDEBUG_CANQUEUE + 10)
5202 qdepth = SDEBUG_CANQUEUE + 10;
5203 scsi_change_queue_depth(sdev, qdepth);
5204
5205 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5206 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5207 __func__, qdepth, num_in_q);
5208 }
5209 block_unblock_all_queues(false);
5210 return sdev->queue_depth;
5211 }
5212
5213 static bool fake_timeout(struct scsi_cmnd *scp)
5214 {
5215 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5216 if (sdebug_every_nth < -1)
5217 sdebug_every_nth = -1;
5218 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5219 return true; /* ignore command causing timeout */
5220 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5221 scsi_medium_access_command(scp))
5222 return true; /* time out reads and writes */
5223 }
5224 return false;
5225 }
5226
5227 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5228 struct scsi_cmnd *scp)
5229 {
5230 u8 sdeb_i;
5231 struct scsi_device *sdp = scp->device;
5232 const struct opcode_info_t *oip;
5233 const struct opcode_info_t *r_oip;
5234 struct sdebug_dev_info *devip;
5235 u8 *cmd = scp->cmnd;
5236 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5237 int k, na;
5238 int errsts = 0;
5239 u32 flags;
5240 u16 sa;
5241 u8 opcode = cmd[0];
5242 bool has_wlun_rl;
5243
5244 scsi_set_resid(scp, 0);
5245 if (sdebug_statistics)
5246 atomic_inc(&sdebug_cmnd_count);
5247 if (unlikely(sdebug_verbose &&
5248 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5249 char b[120];
5250 int n, len, sb;
5251
5252 len = scp->cmd_len;
5253 sb = (int)sizeof(b);
5254 if (len > 32)
5255 strcpy(b, "too long, over 32 bytes");
5256 else {
5257 for (k = 0, n = 0; k < len && n < sb; ++k)
5258 n += scnprintf(b + n, sb - n, "%02x ",
5259 (u32)cmd[k]);
5260 }
5261 if (sdebug_mq_active)
5262 sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5263 my_name, blk_mq_unique_tag(scp->request),
5264 b);
5265 else
5266 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5267 b);
5268 }
5269 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5270 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5271 goto err_out;
5272
5273 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5274 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5275 devip = (struct sdebug_dev_info *)sdp->hostdata;
5276 if (unlikely(!devip)) {
5277 devip = find_build_dev_info(sdp);
5278 if (NULL == devip)
5279 goto err_out;
5280 }
5281 na = oip->num_attached;
5282 r_pfp = oip->pfp;
5283 if (na) { /* multiple commands with this opcode */
5284 r_oip = oip;
5285 if (FF_SA & r_oip->flags) {
5286 if (F_SA_LOW & oip->flags)
5287 sa = 0x1f & cmd[1];
5288 else
5289 sa = get_unaligned_be16(cmd + 8);
5290 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5291 if (opcode == oip->opcode && sa == oip->sa)
5292 break;
5293 }
5294 } else { /* since no service action only check opcode */
5295 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5296 if (opcode == oip->opcode)
5297 break;
5298 }
5299 }
5300 if (k > na) {
5301 if (F_SA_LOW & r_oip->flags)
5302 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5303 else if (F_SA_HIGH & r_oip->flags)
5304 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5305 else
5306 mk_sense_invalid_opcode(scp);
5307 goto check_cond;
5308 }
5309 } /* else (when na==0) we assume the oip is a match */
5310 flags = oip->flags;
5311 if (unlikely(F_INV_OP & flags)) {
5312 mk_sense_invalid_opcode(scp);
5313 goto check_cond;
5314 }
5315 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5316 if (sdebug_verbose)
5317 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5318 my_name, opcode, " supported for wlun");
5319 mk_sense_invalid_opcode(scp);
5320 goto check_cond;
5321 }
5322 if (unlikely(sdebug_strict)) { /* check cdb against mask */
5323 u8 rem;
5324 int j;
5325
5326 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5327 rem = ~oip->len_mask[k] & cmd[k];
5328 if (rem) {
5329 for (j = 7; j >= 0; --j, rem <<= 1) {
5330 if (0x80 & rem)
5331 break;
5332 }
5333 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5334 goto check_cond;
5335 }
5336 }
5337 }
5338 if (unlikely(!(F_SKIP_UA & flags) &&
5339 find_first_bit(devip->uas_bm,
5340 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5341 errsts = make_ua(scp, devip);
5342 if (errsts)
5343 goto check_cond;
5344 }
5345 if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5346 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5347 if (sdebug_verbose)
5348 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5349 "%s\n", my_name, "initializing command "
5350 "required");
5351 errsts = check_condition_result;
5352 goto fini;
5353 }
5354 if (sdebug_fake_rw && (F_FAKE_RW & flags))
5355 goto fini;
5356 if (unlikely(sdebug_every_nth)) {
5357 if (fake_timeout(scp))
5358 return 0; /* ignore command: make trouble */
5359 }
5360 if (likely(oip->pfp))
5361 errsts = oip->pfp(scp, devip); /* calls a resp_* function */
5362 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5363 errsts = r_pfp(scp, devip);
5364
5365 fini:
5366 return schedule_resp(scp, devip, errsts,
5367 ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5368 check_cond:
5369 return schedule_resp(scp, devip, check_condition_result, 0);
5370 err_out:
5371 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5372 }
5373
5374 static struct scsi_host_template sdebug_driver_template = {
5375 .show_info = scsi_debug_show_info,
5376 .write_info = scsi_debug_write_info,
5377 .proc_name = sdebug_proc_name,
5378 .name = "SCSI DEBUG",
5379 .info = scsi_debug_info,
5380 .slave_alloc = scsi_debug_slave_alloc,
5381 .slave_configure = scsi_debug_slave_configure,
5382 .slave_destroy = scsi_debug_slave_destroy,
5383 .ioctl = scsi_debug_ioctl,
5384 .queuecommand = scsi_debug_queuecommand,
5385 .change_queue_depth = sdebug_change_qdepth,
5386 .eh_abort_handler = scsi_debug_abort,
5387 .eh_device_reset_handler = scsi_debug_device_reset,
5388 .eh_target_reset_handler = scsi_debug_target_reset,
5389 .eh_bus_reset_handler = scsi_debug_bus_reset,
5390 .eh_host_reset_handler = scsi_debug_host_reset,
5391 .can_queue = SDEBUG_CANQUEUE,
5392 .this_id = 7,
5393 .sg_tablesize = SG_MAX_SEGMENTS,
5394 .cmd_per_lun = DEF_CMD_PER_LUN,
5395 .max_sectors = -1U,
5396 .use_clustering = DISABLE_CLUSTERING,
5397 .module = THIS_MODULE,
5398 .track_queue_depth = 1,
5399 };
5400
5401 static int sdebug_driver_probe(struct device * dev)
5402 {
5403 int error = 0;
5404 struct sdebug_host_info *sdbg_host;
5405 struct Scsi_Host *hpnt;
5406 int hprot;
5407
5408 sdbg_host = to_sdebug_host(dev);
5409
5410 sdebug_driver_template.can_queue = sdebug_max_queue;
5411 if (sdebug_clustering)
5412 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5413 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5414 if (NULL == hpnt) {
5415 pr_err("scsi_host_alloc failed\n");
5416 error = -ENODEV;
5417 return error;
5418 }
5419 if (submit_queues > nr_cpu_ids) {
5420 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5421 my_name, submit_queues, nr_cpu_ids);
5422 submit_queues = nr_cpu_ids;
5423 }
5424 /* Decide whether to tell scsi subsystem that we want mq */
5425 /* Following should give the same answer for each host */
5426 sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5427 if (sdebug_mq_active)
5428 hpnt->nr_hw_queues = submit_queues;
5429
5430 sdbg_host->shost = hpnt;
5431 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5432 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5433 hpnt->max_id = sdebug_num_tgts + 1;
5434 else
5435 hpnt->max_id = sdebug_num_tgts;
5436 /* = sdebug_max_luns; */
5437 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5438
5439 hprot = 0;
5440
5441 switch (sdebug_dif) {
5442
5443 case SD_DIF_TYPE1_PROTECTION:
5444 hprot = SHOST_DIF_TYPE1_PROTECTION;
5445 if (sdebug_dix)
5446 hprot |= SHOST_DIX_TYPE1_PROTECTION;
5447 break;
5448
5449 case SD_DIF_TYPE2_PROTECTION:
5450 hprot = SHOST_DIF_TYPE2_PROTECTION;
5451 if (sdebug_dix)
5452 hprot |= SHOST_DIX_TYPE2_PROTECTION;
5453 break;
5454
5455 case SD_DIF_TYPE3_PROTECTION:
5456 hprot = SHOST_DIF_TYPE3_PROTECTION;
5457 if (sdebug_dix)
5458 hprot |= SHOST_DIX_TYPE3_PROTECTION;
5459 break;
5460
5461 default:
5462 if (sdebug_dix)
5463 hprot |= SHOST_DIX_TYPE0_PROTECTION;
5464 break;
5465 }
5466
5467 scsi_host_set_prot(hpnt, hprot);
5468
5469 if (have_dif_prot || sdebug_dix)
5470 pr_info("host protection%s%s%s%s%s%s%s\n",
5471 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5472 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5473 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5474 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5475 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5476 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5477 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5478
5479 if (sdebug_guard == 1)
5480 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5481 else
5482 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5483
5484 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5485 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5486 if (sdebug_every_nth) /* need stats counters for every_nth */
5487 sdebug_statistics = true;
5488 error = scsi_add_host(hpnt, &sdbg_host->dev);
5489 if (error) {
5490 pr_err("scsi_add_host failed\n");
5491 error = -ENODEV;
5492 scsi_host_put(hpnt);
5493 } else
5494 scsi_scan_host(hpnt);
5495
5496 return error;
5497 }
5498
5499 static int sdebug_driver_remove(struct device * dev)
5500 {
5501 struct sdebug_host_info *sdbg_host;
5502 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5503
5504 sdbg_host = to_sdebug_host(dev);
5505
5506 if (!sdbg_host) {
5507 pr_err("Unable to locate host info\n");
5508 return -ENODEV;
5509 }
5510
5511 scsi_remove_host(sdbg_host->shost);
5512
5513 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5514 dev_list) {
5515 list_del(&sdbg_devinfo->dev_list);
5516 kfree(sdbg_devinfo);
5517 }
5518
5519 scsi_host_put(sdbg_host->shost);
5520 return 0;
5521 }
5522
5523 static int pseudo_lld_bus_match(struct device *dev,
5524 struct device_driver *dev_driver)
5525 {
5526 return 1;
5527 }
5528
5529 static struct bus_type pseudo_lld_bus = {
5530 .name = "pseudo",
5531 .match = pseudo_lld_bus_match,
5532 .probe = sdebug_driver_probe,
5533 .remove = sdebug_driver_remove,
5534 .drv_groups = sdebug_drv_groups,
5535 };
This page took 0.153015 seconds and 5 git commands to generate.