Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18#include <linux/module.h>
19#include <linux/align.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/jiffies.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25#include <linux/string.h>
26#include <linux/fs.h>
27#include <linux/init.h>
28#include <linux/proc_fs.h>
29#include <linux/vmalloc.h>
30#include <linux/moduleparam.h>
31#include <linux/scatterlist.h>
32#include <linux/blkdev.h>
33#include <linux/crc-t10dif.h>
34#include <linux/spinlock.h>
35#include <linux/interrupt.h>
36#include <linux/atomic.h>
37#include <linux/hrtimer.h>
38#include <linux/uuid.h>
39#include <linux/t10-pi.h>
40#include <linux/msdos_partition.h>
41#include <linux/random.h>
42#include <linux/xarray.h>
43#include <linux/prefetch.h>
44#include <linux/debugfs.h>
45#include <linux/async.h>
46#include <linux/cleanup.h>
47
48#include <net/checksum.h>
49
50#include <linux/unaligned.h>
51
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_device.h>
55#include <scsi/scsi_host.h>
56#include <scsi/scsicam.h>
57#include <scsi/scsi_eh.h>
58#include <scsi/scsi_tcq.h>
59#include <scsi/scsi_dbg.h>
60
61#include "sd.h"
62#include "scsi_logging.h"
63
64/* make sure inq_product_rev string corresponds to this version */
65#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66static const char *sdebug_version_date = "20210520";
67
68#define MY_NAME "scsi_debug"
69
70/* Additional Sense Code (ASC) */
71#define NO_ADDITIONAL_SENSE 0x0
72#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74#define FILEMARK_DETECTED_ASCQ 0x1
75#define EOP_EOM_DETECTED_ASCQ 0x2
76#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77#define EOD_DETECTED_ASCQ 0x5
78#define LOGICAL_UNIT_NOT_READY 0x4
79#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80#define UNRECOVERED_READ_ERR 0x11
81#define PARAMETER_LIST_LENGTH_ERR 0x1a
82#define INVALID_OPCODE 0x20
83#define LBA_OUT_OF_RANGE 0x21
84#define INVALID_FIELD_IN_CDB 0x24
85#define INVALID_FIELD_IN_PARAM_LIST 0x26
86#define WRITE_PROTECTED 0x27
87#define UA_READY_ASC 0x28
88#define UA_RESET_ASC 0x29
89#define UA_CHANGED_ASC 0x2a
90#define TOO_MANY_IN_PARTITION_ASC 0x3b
91#define TARGET_CHANGED_ASC 0x3f
92#define LUNS_CHANGED_ASCQ 0x0e
93#define INSUFF_RES_ASC 0x55
94#define INSUFF_RES_ASCQ 0x3
95#define POWER_ON_RESET_ASCQ 0x0
96#define POWER_ON_OCCURRED_ASCQ 0x1
97#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
98#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
99#define CAPACITY_CHANGED_ASCQ 0x9
100#define SAVING_PARAMS_UNSUP 0x39
101#define TRANSPORT_PROBLEM 0x4b
102#define THRESHOLD_EXCEEDED 0x5d
103#define LOW_POWER_COND_ON 0x5e
104#define MISCOMPARE_VERIFY_ASC 0x1d
105#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
106#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107#define WRITE_ERROR_ASC 0xc
108#define UNALIGNED_WRITE_ASCQ 0x4
109#define WRITE_BOUNDARY_ASCQ 0x5
110#define READ_INVDATA_ASCQ 0x6
111#define READ_BOUNDARY_ASCQ 0x7
112#define ATTEMPT_ACCESS_GAP 0x9
113#define INSUFF_ZONE_ASCQ 0xe
114/* see drivers/scsi/sense_codes.h */
115
116/* Additional Sense Code Qualifier (ASCQ) */
117#define ACK_NAK_TO 0x3
118
119/* Default values for driver parameters */
120#define DEF_NUM_HOST 1
121#define DEF_NUM_TGTS 1
122#define DEF_MAX_LUNS 1
123/* With these defaults, this driver will make 1 host with 1 target
124 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125 */
126#define DEF_ATO 1
127#define DEF_CDB_LEN 10
128#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
129#define DEF_DEV_SIZE_PRE_INIT 0
130#define DEF_DEV_SIZE_MB 8
131#define DEF_ZBC_DEV_SIZE_MB 128
132#define DEF_DIF 0
133#define DEF_DIX 0
134#define DEF_PER_HOST_STORE false
135#define DEF_D_SENSE 0
136#define DEF_EVERY_NTH 0
137#define DEF_FAKE_RW 0
138#define DEF_GUARD 0
139#define DEF_HOST_LOCK 0
140#define DEF_LBPU 0
141#define DEF_LBPWS 0
142#define DEF_LBPWS10 0
143#define DEF_LBPRZ 1
144#define DEF_LOWEST_ALIGNED 0
145#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
146#define DEF_NO_LUN_0 0
147#define DEF_NUM_PARTS 0
148#define DEF_OPTS 0
149#define DEF_OPT_BLKS 1024
150#define DEF_PHYSBLK_EXP 0
151#define DEF_OPT_XFERLEN_EXP 0
152#define DEF_PTYPE TYPE_DISK
153#define DEF_RANDOM false
154#define DEF_REMOVABLE false
155#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156#define DEF_SECTOR_SIZE 512
157#define DEF_UNMAP_ALIGNMENT 0
158#define DEF_UNMAP_GRANULARITY 1
159#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160#define DEF_UNMAP_MAX_DESC 256
161#define DEF_VIRTUAL_GB 0
162#define DEF_VPD_USE_HOSTNO 1
163#define DEF_WRITESAME_LENGTH 0xFFFF
164#define DEF_ATOMIC_WR 0
165#define DEF_ATOMIC_WR_MAX_LENGTH 128
166#define DEF_ATOMIC_WR_ALIGN 2
167#define DEF_ATOMIC_WR_GRAN 2
168#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169#define DEF_ATOMIC_WR_MAX_BNDRY 128
170#define DEF_STRICT 0
171#define DEF_STATISTICS false
172#define DEF_SUBMIT_QUEUES 1
173#define DEF_TUR_MS_TO_READY 0
174#define DEF_UUID_CTL 0
175#define JDELAY_OVERRIDDEN -9999
176
177/* Default parameters for ZBC drives */
178#define DEF_ZBC_ZONE_SIZE_MB 128
179#define DEF_ZBC_MAX_OPEN_ZONES 8
180#define DEF_ZBC_NR_CONV_ZONES 1
181
182/* Default parameters for tape drives */
183#define TAPE_DEF_DENSITY 0x0
184#define TAPE_BAD_DENSITY 0x65
185#define TAPE_DEF_BLKSIZE 0
186#define TAPE_MIN_BLKSIZE 512
187#define TAPE_MAX_BLKSIZE 1048576
188#define TAPE_EW 20
189#define TAPE_MAX_PARTITIONS 2
190#define TAPE_UNITS 10000
191#define TAPE_PARTITION_1_UNITS 1000
192
193/* The tape block data definitions */
194#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
195#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
196#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
201#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202
203struct tape_block {
204 u32 fl_size;
205 unsigned char data[4];
206};
207
208/* Flags for sense data */
209#define SENSE_FLAG_FILEMARK 0x80
210#define SENSE_FLAG_EOM 0x40
211#define SENSE_FLAG_ILI 0x20
212
213#define SDEBUG_LUN_0_VAL 0
214
215/* bit mask values for sdebug_opts */
216#define SDEBUG_OPT_NOISE 1
217#define SDEBUG_OPT_MEDIUM_ERR 2
218#define SDEBUG_OPT_TIMEOUT 4
219#define SDEBUG_OPT_RECOVERED_ERR 8
220#define SDEBUG_OPT_TRANSPORT_ERR 16
221#define SDEBUG_OPT_DIF_ERR 32
222#define SDEBUG_OPT_DIX_ERR 64
223#define SDEBUG_OPT_MAC_TIMEOUT 128
224#define SDEBUG_OPT_SHORT_TRANSFER 0x100
225#define SDEBUG_OPT_Q_NOISE 0x200
226#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
227#define SDEBUG_OPT_RARE_TSF 0x800
228#define SDEBUG_OPT_N_WCE 0x1000
229#define SDEBUG_OPT_RESET_NOISE 0x2000
230#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
231#define SDEBUG_OPT_HOST_BUSY 0x8000
232#define SDEBUG_OPT_CMD_ABORT 0x10000
233#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
234 SDEBUG_OPT_RESET_NOISE)
235#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
236 SDEBUG_OPT_TRANSPORT_ERR | \
237 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
238 SDEBUG_OPT_SHORT_TRANSFER | \
239 SDEBUG_OPT_HOST_BUSY | \
240 SDEBUG_OPT_CMD_ABORT)
241#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
242 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
243
244/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
245 * priority order. In the subset implemented here lower numbers have higher
246 * priority. The UA numbers should be a sequence starting from 0 with
247 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
248#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
249#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
250#define SDEBUG_UA_BUS_RESET 2
251#define SDEBUG_UA_MODE_CHANGED 3
252#define SDEBUG_UA_CAPACITY_CHANGED 4
253#define SDEBUG_UA_LUNS_CHANGED 5
254#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
255#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
256#define SDEBUG_UA_NOT_READY_TO_READY 8
257#define SDEBUG_NUM_UAS 9
258
259/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
260 * sector on read commands: */
261#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
262#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
263
264/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
265 * (for response) per submit queue at one time. Can be reduced by max_queue
266 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
267 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
268 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
269 * but cannot exceed SDEBUG_CANQUEUE .
270 */
271#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
272#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
273#define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
274
275/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276#define F_D_IN 1 /* Data-in command (e.g. READ) */
277#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
278#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
279#define F_D_UNKN 8
280#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
281#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
282#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
283#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
284#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
285#define F_INV_OP 0x200 /* invalid opcode (not supported) */
286#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
287#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
288#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
289#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
290
291/* Useful combinations of the above flags */
292#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
293#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
294#define FF_SA (F_SA_HIGH | F_SA_LOW)
295#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
296
297/* Device selection bit mask */
298#define DS_ALL 0xffffffff
299#define DS_SBC (1 << TYPE_DISK)
300#define DS_SSC (1 << TYPE_TAPE)
301#define DS_ZBC (1 << TYPE_ZBC)
302
303#define DS_NO_SSC (DS_ALL & ~DS_SSC)
304
305#define SDEBUG_MAX_PARTS 4
306
307#define SDEBUG_MAX_CMD_LEN 32
308
309#define SDEB_XA_NOT_IN_USE XA_MARK_1
310
311/* Zone types (zbcr05 table 25) */
312enum sdebug_z_type {
313 ZBC_ZTYPE_CNV = 0x1,
314 ZBC_ZTYPE_SWR = 0x2,
315 ZBC_ZTYPE_SWP = 0x3,
316 /* ZBC_ZTYPE_SOBR = 0x4, */
317 ZBC_ZTYPE_GAP = 0x5,
318};
319
320/* enumeration names taken from table 26, zbcr05 */
321enum sdebug_z_cond {
322 ZBC_NOT_WRITE_POINTER = 0x0,
323 ZC1_EMPTY = 0x1,
324 ZC2_IMPLICIT_OPEN = 0x2,
325 ZC3_EXPLICIT_OPEN = 0x3,
326 ZC4_CLOSED = 0x4,
327 ZC6_READ_ONLY = 0xd,
328 ZC5_FULL = 0xe,
329 ZC7_OFFLINE = 0xf,
330};
331
332struct sdeb_zone_state { /* ZBC: per zone state */
333 enum sdebug_z_type z_type;
334 enum sdebug_z_cond z_cond;
335 bool z_non_seq_resource;
336 unsigned int z_size;
337 sector_t z_start;
338 sector_t z_wp;
339};
340
341enum sdebug_err_type {
342 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
343 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
344 /* queuecmd return failed */
345 ERR_FAIL_CMD = 2, /* make specific scsi command's */
346 /* queuecmd return succeed but */
347 /* with errors set in scsi_cmnd */
348 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
349 /* scsi_debug_abort() */
350 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
351 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
352};
353
354struct sdebug_err_inject {
355 int type;
356 struct list_head list;
357 int cnt;
358 unsigned char cmd;
359 struct rcu_head rcu;
360
361 union {
362 /*
363 * For ERR_FAIL_QUEUE_CMD
364 */
365 int queuecmd_ret;
366
367 /*
368 * For ERR_FAIL_CMD
369 */
370 struct {
371 unsigned char host_byte;
372 unsigned char driver_byte;
373 unsigned char status_byte;
374 unsigned char sense_key;
375 unsigned char asc;
376 unsigned char asq;
377 };
378 };
379};
380
381struct sdebug_dev_info {
382 struct list_head dev_list;
383 unsigned int channel;
384 unsigned int target;
385 u64 lun;
386 uuid_t lu_name;
387 struct sdebug_host_info *sdbg_host;
388 unsigned long uas_bm[1];
389 atomic_t stopped; /* 1: by SSU, 2: device start */
390 bool used;
391
392 /* For ZBC devices */
393 bool zoned;
394 unsigned int zcap;
395 unsigned int zsize;
396 unsigned int zsize_shift;
397 unsigned int nr_zones;
398 unsigned int nr_conv_zones;
399 unsigned int nr_seq_zones;
400 unsigned int nr_imp_open;
401 unsigned int nr_exp_open;
402 unsigned int nr_closed;
403 unsigned int max_open;
404 ktime_t create_ts; /* time since bootup that this device was created */
405 struct sdeb_zone_state *zstate;
406
407 /* For tapes */
408 unsigned int tape_blksize;
409 unsigned int tape_density;
410 unsigned char tape_partition;
411 unsigned char tape_nbr_partitions;
412 unsigned char tape_pending_nbr_partitions;
413 unsigned int tape_pending_part_0_size;
414 unsigned int tape_pending_part_1_size;
415 unsigned char tape_dce;
416 unsigned int tape_location[TAPE_MAX_PARTITIONS];
417 unsigned int tape_eop[TAPE_MAX_PARTITIONS];
418 struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
419
420 struct dentry *debugfs_entry;
421 struct spinlock list_lock;
422 struct list_head inject_err_list;
423};
424
425struct sdebug_target_info {
426 bool reset_fail;
427 struct dentry *debugfs_entry;
428};
429
430struct sdebug_host_info {
431 struct list_head host_list;
432 int si_idx; /* sdeb_store_info (per host) xarray index */
433 struct Scsi_Host *shost;
434 struct device dev;
435 struct list_head dev_info_list;
436};
437
438/* There is an xarray of pointers to this struct's objects, one per host */
439struct sdeb_store_info {
440 rwlock_t macc_data_lck; /* for media data access on this store */
441 rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
442 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
443 u8 *storep; /* user data storage (ram) */
444 struct t10_pi_tuple *dif_storep; /* protection info */
445 void *map_storep; /* provisioning map */
446};
447
448#define dev_to_sdebug_host(d) \
449 container_of(d, struct sdebug_host_info, dev)
450
451#define shost_to_sdebug_host(shost) \
452 dev_to_sdebug_host(shost->dma_dev)
453
454enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
455 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
456
457struct sdebug_defer {
458 struct hrtimer hrt;
459 struct execute_work ew;
460 ktime_t cmpl_ts;/* time since boot to complete this cmd */
461 int issuing_cpu;
462 bool aborted; /* true when blk_abort_request() already called */
463 enum sdeb_defer_type defer_t;
464};
465
466struct sdebug_scsi_cmd {
467 spinlock_t lock;
468 struct sdebug_defer sd_dp;
469};
470
471static atomic_t sdebug_cmnd_count; /* number of incoming commands */
472static atomic_t sdebug_completions; /* count of deferred completions */
473static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
474static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
475static atomic_t sdeb_inject_pending;
476static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
477
478struct opcode_info_t {
479 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
480 /* for terminating element */
481 u8 opcode; /* if num_attached > 0, preferred */
482 u16 sa; /* service action */
483 u32 devsel; /* device type mask for this definition */
484 u32 flags; /* OR-ed set of SDEB_F_* */
485 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
486 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
487 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
488 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
489};
490
491/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
492enum sdeb_opcode_index {
493 SDEB_I_INVALID_OPCODE = 0,
494 SDEB_I_INQUIRY = 1,
495 SDEB_I_REPORT_LUNS = 2,
496 SDEB_I_REQUEST_SENSE = 3,
497 SDEB_I_TEST_UNIT_READY = 4,
498 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
499 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
500 SDEB_I_LOG_SENSE = 7,
501 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
502 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
503 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
504 SDEB_I_START_STOP = 11,
505 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
506 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
507 SDEB_I_MAINT_IN = 14,
508 SDEB_I_MAINT_OUT = 15,
509 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
510 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
511 SDEB_I_RESERVE = 18, /* 6, 10 */
512 SDEB_I_RELEASE = 19, /* 6, 10 */
513 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
514 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
515 SDEB_I_ATA_PT = 22, /* 12, 16 */
516 SDEB_I_SEND_DIAG = 23,
517 SDEB_I_UNMAP = 24,
518 SDEB_I_WRITE_BUFFER = 25,
519 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
520 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
521 SDEB_I_COMP_WRITE = 28,
522 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
523 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
524 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
525 SDEB_I_ATOMIC_WRITE_16 = 32,
526 SDEB_I_READ_BLOCK_LIMITS = 33,
527 SDEB_I_LOCATE = 34,
528 SDEB_I_WRITE_FILEMARKS = 35,
529 SDEB_I_SPACE = 36,
530 SDEB_I_FORMAT_MEDIUM = 37,
531 SDEB_I_ERASE = 38,
532 SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */
533};
534
535
536static const unsigned char opcode_ind_arr[256] = {
537/* 0x0; 0x0->0x1f: 6 byte cdbs */
538 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
539 SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
540 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
541 SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
542 SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
543 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
544 SDEB_I_ALLOW_REMOVAL, 0,
545/* 0x20; 0x20->0x3f: 10 byte cdbs */
546 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
547 SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
548 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
549 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
550/* 0x40; 0x40->0x5f: 10 byte cdbs */
551 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
552 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
553 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
554 SDEB_I_RELEASE,
555 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
556/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
557 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
558 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
559 0, SDEB_I_VARIABLE_LEN,
560/* 0x80; 0x80->0x9f: 16 byte cdbs */
561 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
562 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
563 0, 0, 0, SDEB_I_VERIFY,
564 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
565 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
566 0, 0, 0, 0,
567 SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
568/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
569 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
570 SDEB_I_MAINT_OUT, 0, 0, 0,
571 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
572 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
573 0, 0, 0, 0, 0, 0, 0, 0,
574 0, 0, 0, 0, 0, 0, 0, 0,
575/* 0xc0; 0xc0->0xff: vendor specific */
576 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
577 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
578 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580};
581
582/*
583 * The following "response" functions return the SCSI mid-level's 4 byte
584 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
585 * command completion, they can mask their return value with
586 * SDEG_RES_IMMED_MASK .
587 */
588#define SDEG_RES_IMMED_MASK 0x40000000
589
590static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
591static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
592static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
593static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
594static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
595static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
597static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
598static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
599static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
602static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
603static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
604static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
605static int resp_get_stream_status(struct scsi_cmnd *scp,
606 struct sdebug_dev_info *devip);
607static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
608static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
609static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
610static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
611static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
612static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
613static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
614static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
615static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
616static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
617static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
618static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
619static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
620static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
621static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
622static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
625static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
626static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
627static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
628static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
629static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
630static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
631static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
632
633static int sdebug_do_add_host(bool mk_new_store);
634static int sdebug_add_host_helper(int per_host_idx);
635static void sdebug_do_remove_host(bool the_end);
636static int sdebug_add_store(void);
637static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
638static void sdebug_erase_all_stores(bool apart_from_first);
639
640/*
641 * The following are overflow arrays for cdbs that "hit" the same index in
642 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
643 * should be placed in opcode_info_arr[], the others should be placed here.
644 */
645static const struct opcode_info_t msense_iarr[] = {
646 {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
647 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
648};
649
650static const struct opcode_info_t mselect_iarr[] = {
651 {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
652 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
653};
654
655static const struct opcode_info_t read_iarr[] = {
656 {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
657 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
658 0, 0, 0, 0} },
659 {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
660 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661 {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
662 {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
664 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
665 0xc7, 0, 0, 0, 0} },
666};
667
668static const struct opcode_info_t write_iarr[] = {
669 {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
670 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
671 0, 0, 0, 0, 0, 0} },
672 {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
673 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
674 0, 0, 0} },
675 {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
676 NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
677 0, 0, 0} },
678 {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
679 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
680 0xbf, 0xc7, 0, 0, 0, 0} },
681};
682
683static const struct opcode_info_t verify_iarr[] = {
684 {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
685 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
686 0, 0, 0, 0, 0, 0} },
687};
688
689static const struct opcode_info_t sa_in_16_iarr[] = {
690 {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
691 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
692 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
693 {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
694 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
695 0, 0} }, /* GET STREAM STATUS */
696};
697
698static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
699 {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
700 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
701 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
702 {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
703 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
704 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
705};
706
707static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
708 {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
709 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
710 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
711 {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
712 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
713 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
714};
715
716static const struct opcode_info_t write_same_iarr[] = {
717 {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
718 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
720};
721
722static const struct opcode_info_t reserve_iarr[] = {
723 {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */
724 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
725};
726
727static const struct opcode_info_t release_iarr[] = {
728 {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */
729 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730};
731
732static const struct opcode_info_t sync_cache_iarr[] = {
733 {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
734 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
735 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
736};
737
738static const struct opcode_info_t pre_fetch_iarr[] = {
739 {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
740 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
742 {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
743 {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
744 0, 0, 0, 0} }, /* READ POSITION (10) */
745};
746
747static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
748 {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
749 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
750 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
751 {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
752 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
753 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
754 {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
755 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
756 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
757};
758
759static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
760 {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
761 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
762 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
763};
764
765
766/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
767 * plus the terminating elements for logic that scans this table such as
768 * REPORT SUPPORTED OPERATION CODES. */
769static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
770/* 0 */
771 {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
772 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
773 {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
774 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
776 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
777 0, 0} }, /* REPORT LUNS */
778 {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
779 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
781 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782/* 5 */
783 {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */
784 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
785 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
786 {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */
787 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
788 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
789 {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
790 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
791 0, 0, 0} },
792 {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
793 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
794 0, 0} },
795 {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
796 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
797 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
798/* 10 */
799 {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
800 resp_write_dt0, write_iarr, /* WRITE(16) */
801 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
802 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
803 {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
804 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
805 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
806 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
807 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
809 {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
810 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
811 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
812 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
813 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
814 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
815 0xff, 0, 0xc7, 0, 0, 0, 0} },
816/* 15 */
817 {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
818 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
819 {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
820 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
821 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
822 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
823 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
824 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
825 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
826 0xff, 0xff} },
827 {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
828 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
829 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
830 0} },
831 {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
832 NULL, release_iarr, /* RELEASE(10) <no response function> */
833 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
834 0} },
835/* 20 */
836 {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
837 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
838 {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
839 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
841 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */
843 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
845 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
846/* 25 */
847 {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
848 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
849 0, 0, 0, 0} }, /* WRITE_BUFFER */
850 {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
851 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
852 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
853 0, 0, 0, 0, 0} },
854 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
855 resp_sync_cache, sync_cache_iarr,
856 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
857 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
858 {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
859 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
860 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
861 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
862 resp_pre_fetch, pre_fetch_iarr,
863 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
864 0, 0, 0, 0} }, /* PRE-FETCH (10) */
865 /* READ POSITION (10) */
866
867/* 30 */
868 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
869 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
870 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
871 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
872 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
873 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
874 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
875 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
876/* 32 */
877 {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
878 resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
879 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
880 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
881 {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
882 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
883 {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
884 {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
885 0, 0, 0, 0} },
886 {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
887 {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
888 {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */
889 {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
891 {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */
893 {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894/* 39 */
895/* sentinel */
896 {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */
897 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
898};
899
900static int sdebug_num_hosts;
901static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
902static int sdebug_ato = DEF_ATO;
903static int sdebug_cdb_len = DEF_CDB_LEN;
904static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
905static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
906static int sdebug_dif = DEF_DIF;
907static int sdebug_dix = DEF_DIX;
908static int sdebug_dsense = DEF_D_SENSE;
909static int sdebug_every_nth = DEF_EVERY_NTH;
910static int sdebug_fake_rw = DEF_FAKE_RW;
911static unsigned int sdebug_guard = DEF_GUARD;
912static int sdebug_host_max_queue; /* per host */
913static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
914static int sdebug_max_luns = DEF_MAX_LUNS;
915static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
916static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
917static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
918static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
919static int sdebug_no_lun_0 = DEF_NO_LUN_0;
920static int sdebug_no_uld;
921static int sdebug_num_parts = DEF_NUM_PARTS;
922static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
923static int sdebug_opt_blks = DEF_OPT_BLKS;
924static int sdebug_opts = DEF_OPTS;
925static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
926static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
927static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
928static int sdebug_scsi_level = DEF_SCSI_LEVEL;
929static int sdebug_sector_size = DEF_SECTOR_SIZE;
930static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
931static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
932static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
933static unsigned int sdebug_lbpu = DEF_LBPU;
934static unsigned int sdebug_lbpws = DEF_LBPWS;
935static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
936static unsigned int sdebug_lbprz = DEF_LBPRZ;
937static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
938static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
939static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
940static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
941static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
942static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
943static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
944static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
945static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
946static unsigned int sdebug_atomic_wr_max_length_bndry =
947 DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
948static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
949static int sdebug_uuid_ctl = DEF_UUID_CTL;
950static bool sdebug_random = DEF_RANDOM;
951static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
952static bool sdebug_removable = DEF_REMOVABLE;
953static bool sdebug_clustering;
954static bool sdebug_host_lock = DEF_HOST_LOCK;
955static bool sdebug_strict = DEF_STRICT;
956static bool sdebug_any_injecting_opt;
957static bool sdebug_no_rwlock;
958static bool sdebug_verbose;
959static bool have_dif_prot;
960static bool write_since_sync;
961static bool sdebug_statistics = DEF_STATISTICS;
962static bool sdebug_wp;
963static bool sdebug_allow_restart;
964static enum {
965 BLK_ZONED_NONE = 0,
966 BLK_ZONED_HA = 1,
967 BLK_ZONED_HM = 2,
968} sdeb_zbc_model = BLK_ZONED_NONE;
969static char *sdeb_zbc_model_s;
970
971enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
972 SAM_LUN_AM_FLAT = 0x1,
973 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
974 SAM_LUN_AM_EXTENDED = 0x3};
975static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
976static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
977
978static unsigned int sdebug_store_sectors;
979static sector_t sdebug_capacity; /* in sectors */
980
981/* old BIOS stuff, kernel may get rid of them but some mode sense pages
982 may still need them */
983static int sdebug_heads; /* heads per disk */
984static int sdebug_cylinders_per; /* cylinders per surface */
985static int sdebug_sectors_per; /* sectors per cylinder */
986
987static LIST_HEAD(sdebug_host_list);
988static DEFINE_MUTEX(sdebug_host_list_mutex);
989
990static struct xarray per_store_arr;
991static struct xarray *per_store_ap = &per_store_arr;
992static int sdeb_first_idx = -1; /* invalid index ==> none created */
993static int sdeb_most_recent_idx = -1;
994static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
995
996static unsigned long map_size;
997static int num_aborts;
998static int num_dev_resets;
999static int num_target_resets;
1000static int num_bus_resets;
1001static int num_host_resets;
1002static int dix_writes;
1003static int dix_reads;
1004static int dif_errors;
1005
1006/* ZBC global data */
1007static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
1008static int sdeb_zbc_zone_cap_mb;
1009static int sdeb_zbc_zone_size_mb;
1010static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1011static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1012
1013static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
1014static int poll_queues; /* iouring iopoll interface.*/
1015
1016static atomic_long_t writes_by_group_number[64];
1017
1018static char sdebug_proc_name[] = MY_NAME;
1019static const char *my_name = MY_NAME;
1020
1021static const struct bus_type pseudo_lld_bus;
1022
1023static struct device_driver sdebug_driverfs_driver = {
1024 .name = sdebug_proc_name,
1025 .bus = &pseudo_lld_bus,
1026};
1027
1028static const int check_condition_result =
1029 SAM_STAT_CHECK_CONDITION;
1030
1031static const int illegal_condition_result =
1032 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1033
1034static const int device_qfull_result =
1035 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1036
1037static const int condition_met_result = SAM_STAT_CONDITION_MET;
1038
1039static struct dentry *sdebug_debugfs_root;
1040static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1041
1042static u32 sdebug_get_devsel(struct scsi_device *sdp)
1043{
1044 unsigned char devtype = sdp->type;
1045 u32 devsel;
1046
1047 if (devtype < 32)
1048 devsel = (1 << devtype);
1049 else
1050 devsel = DS_ALL;
1051
1052 return devsel;
1053}
1054
1055static void sdebug_err_free(struct rcu_head *head)
1056{
1057 struct sdebug_err_inject *inject =
1058 container_of(head, typeof(*inject), rcu);
1059
1060 kfree(inject);
1061}
1062
1063static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1064{
1065 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1066 struct sdebug_err_inject *err;
1067
1068 spin_lock(&devip->list_lock);
1069 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1070 if (err->type == new->type && err->cmd == new->cmd) {
1071 list_del_rcu(&err->list);
1072 call_rcu(&err->rcu, sdebug_err_free);
1073 }
1074 }
1075
1076 list_add_tail_rcu(&new->list, &devip->inject_err_list);
1077 spin_unlock(&devip->list_lock);
1078}
1079
1080static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1081{
1082 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1083 struct sdebug_err_inject *err;
1084 int type;
1085 unsigned char cmd;
1086
1087 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1088 kfree(buf);
1089 return -EINVAL;
1090 }
1091
1092 spin_lock(&devip->list_lock);
1093 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1094 if (err->type == type && err->cmd == cmd) {
1095 list_del_rcu(&err->list);
1096 call_rcu(&err->rcu, sdebug_err_free);
1097 spin_unlock(&devip->list_lock);
1098 kfree(buf);
1099 return count;
1100 }
1101 }
1102 spin_unlock(&devip->list_lock);
1103
1104 kfree(buf);
1105 return -EINVAL;
1106}
1107
1108static int sdebug_error_show(struct seq_file *m, void *p)
1109{
1110 struct scsi_device *sdev = (struct scsi_device *)m->private;
1111 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1112 struct sdebug_err_inject *err;
1113
1114 seq_puts(m, "Type\tCount\tCommand\n");
1115
1116 rcu_read_lock();
1117 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1118 switch (err->type) {
1119 case ERR_TMOUT_CMD:
1120 case ERR_ABORT_CMD_FAILED:
1121 case ERR_LUN_RESET_FAILED:
1122 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1123 err->cmd);
1124 break;
1125
1126 case ERR_FAIL_QUEUE_CMD:
1127 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1128 err->cnt, err->cmd, err->queuecmd_ret);
1129 break;
1130
1131 case ERR_FAIL_CMD:
1132 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1133 err->type, err->cnt, err->cmd,
1134 err->host_byte, err->driver_byte,
1135 err->status_byte, err->sense_key,
1136 err->asc, err->asq);
1137 break;
1138 }
1139 }
1140 rcu_read_unlock();
1141
1142 return 0;
1143}
1144
1145static int sdebug_error_open(struct inode *inode, struct file *file)
1146{
1147 return single_open(file, sdebug_error_show, inode->i_private);
1148}
1149
1150static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1151 size_t count, loff_t *ppos)
1152{
1153 char *buf;
1154 unsigned int inject_type;
1155 struct sdebug_err_inject *inject;
1156 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1157
1158 buf = memdup_user_nul(ubuf, count);
1159 if (IS_ERR(buf))
1160 return PTR_ERR(buf);
1161
1162 if (buf[0] == '-')
1163 return sdebug_err_remove(sdev, buf, count);
1164
1165 if (sscanf(buf, "%d", &inject_type) != 1) {
1166 kfree(buf);
1167 return -EINVAL;
1168 }
1169
1170 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1171 if (!inject) {
1172 kfree(buf);
1173 return -ENOMEM;
1174 }
1175
1176 switch (inject_type) {
1177 case ERR_TMOUT_CMD:
1178 case ERR_ABORT_CMD_FAILED:
1179 case ERR_LUN_RESET_FAILED:
1180 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1181 &inject->cmd) != 3)
1182 goto out_error;
1183 break;
1184
1185 case ERR_FAIL_QUEUE_CMD:
1186 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1187 &inject->cmd, &inject->queuecmd_ret) != 4)
1188 goto out_error;
1189 break;
1190
1191 case ERR_FAIL_CMD:
1192 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1193 &inject->type, &inject->cnt, &inject->cmd,
1194 &inject->host_byte, &inject->driver_byte,
1195 &inject->status_byte, &inject->sense_key,
1196 &inject->asc, &inject->asq) != 9)
1197 goto out_error;
1198 break;
1199
1200 default:
1201 goto out_error;
1202 break;
1203 }
1204
1205 kfree(buf);
1206 sdebug_err_add(sdev, inject);
1207
1208 return count;
1209
1210out_error:
1211 kfree(buf);
1212 kfree(inject);
1213 return -EINVAL;
1214}
1215
1216static const struct file_operations sdebug_error_fops = {
1217 .open = sdebug_error_open,
1218 .read = seq_read,
1219 .write = sdebug_error_write,
1220 .release = single_release,
1221};
1222
1223static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1224{
1225 struct scsi_target *starget = (struct scsi_target *)m->private;
1226 struct sdebug_target_info *targetip =
1227 (struct sdebug_target_info *)starget->hostdata;
1228
1229 if (targetip)
1230 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1231
1232 return 0;
1233}
1234
1235static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1236{
1237 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1238}
1239
1240static ssize_t sdebug_target_reset_fail_write(struct file *file,
1241 const char __user *ubuf, size_t count, loff_t *ppos)
1242{
1243 int ret;
1244 struct scsi_target *starget =
1245 (struct scsi_target *)file->f_inode->i_private;
1246 struct sdebug_target_info *targetip =
1247 (struct sdebug_target_info *)starget->hostdata;
1248
1249 if (targetip) {
1250 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1251 return ret < 0 ? ret : count;
1252 }
1253 return -ENODEV;
1254}
1255
1256static const struct file_operations sdebug_target_reset_fail_fops = {
1257 .open = sdebug_target_reset_fail_open,
1258 .read = seq_read,
1259 .write = sdebug_target_reset_fail_write,
1260 .release = single_release,
1261};
1262
1263static int sdebug_target_alloc(struct scsi_target *starget)
1264{
1265 struct sdebug_target_info *targetip;
1266
1267 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1268 if (!targetip)
1269 return -ENOMEM;
1270
1271 async_synchronize_full_domain(&sdebug_async_domain);
1272
1273 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1274 sdebug_debugfs_root);
1275
1276 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1277 &sdebug_target_reset_fail_fops);
1278
1279 starget->hostdata = targetip;
1280
1281 return 0;
1282}
1283
1284static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1285{
1286 struct sdebug_target_info *targetip = data;
1287
1288 debugfs_remove(targetip->debugfs_entry);
1289 kfree(targetip);
1290}
1291
1292static void sdebug_target_destroy(struct scsi_target *starget)
1293{
1294 struct sdebug_target_info *targetip;
1295
1296 targetip = (struct sdebug_target_info *)starget->hostdata;
1297 if (targetip) {
1298 starget->hostdata = NULL;
1299 async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1300 &sdebug_async_domain);
1301 }
1302}
1303
1304/* Only do the extra work involved in logical block provisioning if one or
1305 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1306 * real reads and writes (i.e. not skipping them for speed).
1307 */
1308static inline bool scsi_debug_lbp(void)
1309{
1310 return 0 == sdebug_fake_rw &&
1311 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1312}
1313
1314static inline bool scsi_debug_atomic_write(void)
1315{
1316 return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1317}
1318
1319static void *lba2fake_store(struct sdeb_store_info *sip,
1320 unsigned long long lba)
1321{
1322 struct sdeb_store_info *lsip = sip;
1323
1324 lba = do_div(lba, sdebug_store_sectors);
1325 if (!sip || !sip->storep) {
1326 WARN_ON_ONCE(true);
1327 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1328 }
1329 return lsip->storep + lba * sdebug_sector_size;
1330}
1331
1332static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1333 sector_t sector)
1334{
1335 sector = sector_div(sector, sdebug_store_sectors);
1336
1337 return sip->dif_storep + sector;
1338}
1339
1340static void sdebug_max_tgts_luns(void)
1341{
1342 struct sdebug_host_info *sdbg_host;
1343 struct Scsi_Host *hpnt;
1344
1345 mutex_lock(&sdebug_host_list_mutex);
1346 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1347 hpnt = sdbg_host->shost;
1348 if ((hpnt->this_id >= 0) &&
1349 (sdebug_num_tgts > hpnt->this_id))
1350 hpnt->max_id = sdebug_num_tgts + 1;
1351 else
1352 hpnt->max_id = sdebug_num_tgts;
1353 /* sdebug_max_luns; */
1354 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1355 }
1356 mutex_unlock(&sdebug_host_list_mutex);
1357}
1358
1359enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1360
1361/* Set in_bit to -1 to indicate no bit position of invalid field */
1362static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1363 enum sdeb_cmd_data c_d,
1364 int in_byte, int in_bit)
1365{
1366 unsigned char *sbuff;
1367 u8 sks[4];
1368 int sl, asc;
1369
1370 sbuff = scp->sense_buffer;
1371 if (!sbuff) {
1372 sdev_printk(KERN_ERR, scp->device,
1373 "%s: sense_buffer is NULL\n", __func__);
1374 return;
1375 }
1376 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1377 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1378 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1379 memset(sks, 0, sizeof(sks));
1380 sks[0] = 0x80;
1381 if (c_d)
1382 sks[0] |= 0x40;
1383 if (in_bit >= 0) {
1384 sks[0] |= 0x8;
1385 sks[0] |= 0x7 & in_bit;
1386 }
1387 put_unaligned_be16(in_byte, sks + 1);
1388 if (sdebug_dsense) {
1389 sl = sbuff[7] + 8;
1390 sbuff[7] = sl;
1391 sbuff[sl] = 0x2;
1392 sbuff[sl + 1] = 0x6;
1393 memcpy(sbuff + sl + 4, sks, 3);
1394 } else
1395 memcpy(sbuff + 15, sks, 3);
1396 if (sdebug_verbose)
1397 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1398 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1399 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1400}
1401
1402static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1403{
1404 if (!scp->sense_buffer) {
1405 sdev_printk(KERN_ERR, scp->device,
1406 "%s: sense_buffer is NULL\n", __func__);
1407 return;
1408 }
1409 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1410
1411 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1412
1413 if (sdebug_verbose)
1414 sdev_printk(KERN_INFO, scp->device,
1415 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1416 my_name, key, asc, asq);
1417}
1418
1419/* Sense data that has information fields for tapes */
1420static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1421 unsigned int information, unsigned char tape_flags)
1422{
1423 if (!scp->sense_buffer) {
1424 sdev_printk(KERN_ERR, scp->device,
1425 "%s: sense_buffer is NULL\n", __func__);
1426 return;
1427 }
1428 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1429
1430 scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1431 /* only fixed format so far */
1432
1433 scp->sense_buffer[0] |= 0x80; /* valid */
1434 scp->sense_buffer[2] |= tape_flags;
1435 put_unaligned_be32(information, &scp->sense_buffer[3]);
1436
1437 if (sdebug_verbose)
1438 sdev_printk(KERN_INFO, scp->device,
1439 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1440 my_name, key, asc, asq);
1441}
1442
1443static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1444{
1445 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1446}
1447
1448static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1449 void __user *arg)
1450{
1451 if (sdebug_verbose) {
1452 if (0x1261 == cmd)
1453 sdev_printk(KERN_INFO, dev,
1454 "%s: BLKFLSBUF [0x1261]\n", __func__);
1455 else if (0x5331 == cmd)
1456 sdev_printk(KERN_INFO, dev,
1457 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1458 __func__);
1459 else
1460 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1461 __func__, cmd);
1462 }
1463 return -EINVAL;
1464 /* return -ENOTTY; // correct return but upsets fdisk */
1465}
1466
1467static void config_cdb_len(struct scsi_device *sdev)
1468{
1469 switch (sdebug_cdb_len) {
1470 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1471 sdev->use_10_for_rw = false;
1472 sdev->use_16_for_rw = false;
1473 sdev->use_10_for_ms = false;
1474 break;
1475 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1476 sdev->use_10_for_rw = true;
1477 sdev->use_16_for_rw = false;
1478 sdev->use_10_for_ms = false;
1479 break;
1480 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1481 sdev->use_10_for_rw = true;
1482 sdev->use_16_for_rw = false;
1483 sdev->use_10_for_ms = true;
1484 break;
1485 case 16:
1486 sdev->use_10_for_rw = false;
1487 sdev->use_16_for_rw = true;
1488 sdev->use_10_for_ms = true;
1489 break;
1490 case 32: /* No knobs to suggest this so same as 16 for now */
1491 sdev->use_10_for_rw = false;
1492 sdev->use_16_for_rw = true;
1493 sdev->use_10_for_ms = true;
1494 break;
1495 default:
1496 pr_warn("unexpected cdb_len=%d, force to 10\n",
1497 sdebug_cdb_len);
1498 sdev->use_10_for_rw = true;
1499 sdev->use_16_for_rw = false;
1500 sdev->use_10_for_ms = false;
1501 sdebug_cdb_len = 10;
1502 break;
1503 }
1504}
1505
1506static void all_config_cdb_len(void)
1507{
1508 struct sdebug_host_info *sdbg_host;
1509 struct Scsi_Host *shost;
1510 struct scsi_device *sdev;
1511
1512 mutex_lock(&sdebug_host_list_mutex);
1513 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1514 shost = sdbg_host->shost;
1515 shost_for_each_device(sdev, shost) {
1516 config_cdb_len(sdev);
1517 }
1518 }
1519 mutex_unlock(&sdebug_host_list_mutex);
1520}
1521
1522static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1523{
1524 struct sdebug_host_info *sdhp = devip->sdbg_host;
1525 struct sdebug_dev_info *dp;
1526
1527 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1528 if ((devip->sdbg_host == dp->sdbg_host) &&
1529 (devip->target == dp->target)) {
1530 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1531 }
1532 }
1533}
1534
1535static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1536{
1537 int k;
1538
1539 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1540 if (k != SDEBUG_NUM_UAS) {
1541 const char *cp = NULL;
1542
1543 switch (k) {
1544 case SDEBUG_UA_POR:
1545 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1546 POWER_ON_RESET_ASCQ);
1547 if (sdebug_verbose)
1548 cp = "power on reset";
1549 break;
1550 case SDEBUG_UA_POOCCUR:
1551 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1552 POWER_ON_OCCURRED_ASCQ);
1553 if (sdebug_verbose)
1554 cp = "power on occurred";
1555 break;
1556 case SDEBUG_UA_BUS_RESET:
1557 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1558 BUS_RESET_ASCQ);
1559 if (sdebug_verbose)
1560 cp = "bus reset";
1561 break;
1562 case SDEBUG_UA_MODE_CHANGED:
1563 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1564 MODE_CHANGED_ASCQ);
1565 if (sdebug_verbose)
1566 cp = "mode parameters changed";
1567 break;
1568 case SDEBUG_UA_CAPACITY_CHANGED:
1569 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1570 CAPACITY_CHANGED_ASCQ);
1571 if (sdebug_verbose)
1572 cp = "capacity data changed";
1573 break;
1574 case SDEBUG_UA_MICROCODE_CHANGED:
1575 mk_sense_buffer(scp, UNIT_ATTENTION,
1576 TARGET_CHANGED_ASC,
1577 MICROCODE_CHANGED_ASCQ);
1578 if (sdebug_verbose)
1579 cp = "microcode has been changed";
1580 break;
1581 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1582 mk_sense_buffer(scp, UNIT_ATTENTION,
1583 TARGET_CHANGED_ASC,
1584 MICROCODE_CHANGED_WO_RESET_ASCQ);
1585 if (sdebug_verbose)
1586 cp = "microcode has been changed without reset";
1587 break;
1588 case SDEBUG_UA_LUNS_CHANGED:
1589 /*
1590 * SPC-3 behavior is to report a UNIT ATTENTION with
1591 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1592 * on the target, until a REPORT LUNS command is
1593 * received. SPC-4 behavior is to report it only once.
1594 * NOTE: sdebug_scsi_level does not use the same
1595 * values as struct scsi_device->scsi_level.
1596 */
1597 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1598 clear_luns_changed_on_target(devip);
1599 mk_sense_buffer(scp, UNIT_ATTENTION,
1600 TARGET_CHANGED_ASC,
1601 LUNS_CHANGED_ASCQ);
1602 if (sdebug_verbose)
1603 cp = "reported luns data has changed";
1604 break;
1605 case SDEBUG_UA_NOT_READY_TO_READY:
1606 mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1607 0);
1608 if (sdebug_verbose)
1609 cp = "not ready to ready transition/media change";
1610 break;
1611 default:
1612 pr_warn("unexpected unit attention code=%d\n", k);
1613 if (sdebug_verbose)
1614 cp = "unknown";
1615 break;
1616 }
1617 clear_bit(k, devip->uas_bm);
1618 if (sdebug_verbose)
1619 sdev_printk(KERN_INFO, scp->device,
1620 "%s reports: Unit attention: %s\n",
1621 my_name, cp);
1622 return check_condition_result;
1623 }
1624 return 0;
1625}
1626
1627/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1628static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1629 int arr_len)
1630{
1631 int act_len;
1632 struct scsi_data_buffer *sdb = &scp->sdb;
1633
1634 if (!sdb->length)
1635 return 0;
1636 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1637 return DID_ERROR << 16;
1638
1639 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1640 arr, arr_len);
1641 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1642
1643 return 0;
1644}
1645
1646/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1647 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1648 * calls, not required to write in ascending offset order. Assumes resid
1649 * set to scsi_bufflen() prior to any calls.
1650 */
1651static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1652 int arr_len, unsigned int off_dst)
1653{
1654 unsigned int act_len, n;
1655 struct scsi_data_buffer *sdb = &scp->sdb;
1656 off_t skip = off_dst;
1657
1658 if (sdb->length <= off_dst)
1659 return 0;
1660 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1661 return DID_ERROR << 16;
1662
1663 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1664 arr, arr_len, skip);
1665 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1666 __func__, off_dst, scsi_bufflen(scp), act_len,
1667 scsi_get_resid(scp));
1668 n = scsi_bufflen(scp) - (off_dst + act_len);
1669 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1670 return 0;
1671}
1672
1673/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1674 * 'arr' or -1 if error.
1675 */
1676static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1677 int arr_len)
1678{
1679 if (!scsi_bufflen(scp))
1680 return 0;
1681 if (scp->sc_data_direction != DMA_TO_DEVICE)
1682 return -1;
1683
1684 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1685}
1686
1687
1688static char sdebug_inq_vendor_id[9] = "Linux ";
1689static char sdebug_inq_product_id[17] = "scsi_debug ";
1690static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1691/* Use some locally assigned NAAs for SAS addresses. */
1692static const u64 naa3_comp_a = 0x3222222000000000ULL;
1693static const u64 naa3_comp_b = 0x3333333000000000ULL;
1694static const u64 naa3_comp_c = 0x3111111000000000ULL;
1695
1696/* Device identification VPD page. Returns number of bytes placed in arr */
1697static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1698 int target_dev_id, int dev_id_num,
1699 const char *dev_id_str, int dev_id_str_len,
1700 const uuid_t *lu_name)
1701{
1702 int num, port_a;
1703 char b[32];
1704
1705 port_a = target_dev_id + 1;
1706 /* T10 vendor identifier field format (faked) */
1707 arr[0] = 0x2; /* ASCII */
1708 arr[1] = 0x1;
1709 arr[2] = 0x0;
1710 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1711 memcpy(&arr[12], sdebug_inq_product_id, 16);
1712 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1713 num = 8 + 16 + dev_id_str_len;
1714 arr[3] = num;
1715 num += 4;
1716 if (dev_id_num >= 0) {
1717 if (sdebug_uuid_ctl) {
1718 /* Locally assigned UUID */
1719 arr[num++] = 0x1; /* binary (not necessarily sas) */
1720 arr[num++] = 0xa; /* PIV=0, lu, naa */
1721 arr[num++] = 0x0;
1722 arr[num++] = 0x12;
1723 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1724 arr[num++] = 0x0;
1725 memcpy(arr + num, lu_name, 16);
1726 num += 16;
1727 } else {
1728 /* NAA-3, Logical unit identifier (binary) */
1729 arr[num++] = 0x1; /* binary (not necessarily sas) */
1730 arr[num++] = 0x3; /* PIV=0, lu, naa */
1731 arr[num++] = 0x0;
1732 arr[num++] = 0x8;
1733 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1734 num += 8;
1735 }
1736 /* Target relative port number */
1737 arr[num++] = 0x61; /* proto=sas, binary */
1738 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1739 arr[num++] = 0x0; /* reserved */
1740 arr[num++] = 0x4; /* length */
1741 arr[num++] = 0x0; /* reserved */
1742 arr[num++] = 0x0; /* reserved */
1743 arr[num++] = 0x0;
1744 arr[num++] = 0x1; /* relative port A */
1745 }
1746 /* NAA-3, Target port identifier */
1747 arr[num++] = 0x61; /* proto=sas, binary */
1748 arr[num++] = 0x93; /* piv=1, target port, naa */
1749 arr[num++] = 0x0;
1750 arr[num++] = 0x8;
1751 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1752 num += 8;
1753 /* NAA-3, Target port group identifier */
1754 arr[num++] = 0x61; /* proto=sas, binary */
1755 arr[num++] = 0x95; /* piv=1, target port group id */
1756 arr[num++] = 0x0;
1757 arr[num++] = 0x4;
1758 arr[num++] = 0;
1759 arr[num++] = 0;
1760 put_unaligned_be16(port_group_id, arr + num);
1761 num += 2;
1762 /* NAA-3, Target device identifier */
1763 arr[num++] = 0x61; /* proto=sas, binary */
1764 arr[num++] = 0xa3; /* piv=1, target device, naa */
1765 arr[num++] = 0x0;
1766 arr[num++] = 0x8;
1767 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1768 num += 8;
1769 /* SCSI name string: Target device identifier */
1770 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1771 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1772 arr[num++] = 0x0;
1773 arr[num++] = 24;
1774 memcpy(arr + num, "naa.32222220", 12);
1775 num += 12;
1776 snprintf(b, sizeof(b), "%08X", target_dev_id);
1777 memcpy(arr + num, b, 8);
1778 num += 8;
1779 memset(arr + num, 0, 4);
1780 num += 4;
1781 return num;
1782}
1783
1784static unsigned char vpd84_data[] = {
1785/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1786 0x22,0x22,0x22,0x0,0xbb,0x1,
1787 0x22,0x22,0x22,0x0,0xbb,0x2,
1788};
1789
1790/* Software interface identification VPD page */
1791static int inquiry_vpd_84(unsigned char *arr)
1792{
1793 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1794 return sizeof(vpd84_data);
1795}
1796
1797/* Management network addresses VPD page */
1798static int inquiry_vpd_85(unsigned char *arr)
1799{
1800 int num = 0;
1801 const char *na1 = "https://www.kernel.org/config";
1802 const char *na2 = "http://www.kernel.org/log";
1803 int plen, olen;
1804
1805 arr[num++] = 0x1; /* lu, storage config */
1806 arr[num++] = 0x0; /* reserved */
1807 arr[num++] = 0x0;
1808 olen = strlen(na1);
1809 plen = olen + 1;
1810 if (plen % 4)
1811 plen = ((plen / 4) + 1) * 4;
1812 arr[num++] = plen; /* length, null termianted, padded */
1813 memcpy(arr + num, na1, olen);
1814 memset(arr + num + olen, 0, plen - olen);
1815 num += plen;
1816
1817 arr[num++] = 0x4; /* lu, logging */
1818 arr[num++] = 0x0; /* reserved */
1819 arr[num++] = 0x0;
1820 olen = strlen(na2);
1821 plen = olen + 1;
1822 if (plen % 4)
1823 plen = ((plen / 4) + 1) * 4;
1824 arr[num++] = plen; /* length, null terminated, padded */
1825 memcpy(arr + num, na2, olen);
1826 memset(arr + num + olen, 0, plen - olen);
1827 num += plen;
1828
1829 return num;
1830}
1831
1832/* SCSI ports VPD page */
1833static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1834{
1835 int num = 0;
1836 int port_a, port_b;
1837
1838 port_a = target_dev_id + 1;
1839 port_b = port_a + 1;
1840 arr[num++] = 0x0; /* reserved */
1841 arr[num++] = 0x0; /* reserved */
1842 arr[num++] = 0x0;
1843 arr[num++] = 0x1; /* relative port 1 (primary) */
1844 memset(arr + num, 0, 6);
1845 num += 6;
1846 arr[num++] = 0x0;
1847 arr[num++] = 12; /* length tp descriptor */
1848 /* naa-5 target port identifier (A) */
1849 arr[num++] = 0x61; /* proto=sas, binary */
1850 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1851 arr[num++] = 0x0; /* reserved */
1852 arr[num++] = 0x8; /* length */
1853 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1854 num += 8;
1855 arr[num++] = 0x0; /* reserved */
1856 arr[num++] = 0x0; /* reserved */
1857 arr[num++] = 0x0;
1858 arr[num++] = 0x2; /* relative port 2 (secondary) */
1859 memset(arr + num, 0, 6);
1860 num += 6;
1861 arr[num++] = 0x0;
1862 arr[num++] = 12; /* length tp descriptor */
1863 /* naa-5 target port identifier (B) */
1864 arr[num++] = 0x61; /* proto=sas, binary */
1865 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1866 arr[num++] = 0x0; /* reserved */
1867 arr[num++] = 0x8; /* length */
1868 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1869 num += 8;
1870
1871 return num;
1872}
1873
1874
1875static unsigned char vpd89_data[] = {
1876/* from 4th byte */ 0,0,0,0,
1877'l','i','n','u','x',' ',' ',' ',
1878'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1879'1','2','3','4',
18800x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
18810xec,0,0,0,
18820x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
18830,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
18840x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
18850x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
18860x53,0x41,
18870x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
18880x20,0x20,
18890x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
18900x10,0x80,
18910,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
18920x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
18930x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
18940,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
18950x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
18960x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
18970,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
18980,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
18990,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19000,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19010x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
19020,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
19030xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
19040,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
19050,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19060,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19070,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19080,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19090,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19100,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19110,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19120,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19130,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19140,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19150,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
19160,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1917};
1918
1919/* ATA Information VPD page */
1920static int inquiry_vpd_89(unsigned char *arr)
1921{
1922 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1923 return sizeof(vpd89_data);
1924}
1925
1926
1927static unsigned char vpdb0_data[] = {
1928 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1929 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1930 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1931 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1932};
1933
1934/* Block limits VPD page (SBC-3) */
1935static int inquiry_vpd_b0(unsigned char *arr)
1936{
1937 unsigned int gran;
1938
1939 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1940
1941 /* Optimal transfer length granularity */
1942 if (sdebug_opt_xferlen_exp != 0 &&
1943 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1944 gran = 1 << sdebug_opt_xferlen_exp;
1945 else
1946 gran = 1 << sdebug_physblk_exp;
1947 put_unaligned_be16(gran, arr + 2);
1948
1949 /* Maximum Transfer Length */
1950 if (sdebug_store_sectors > 0x400)
1951 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1952
1953 /* Optimal Transfer Length */
1954 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1955
1956 if (sdebug_lbpu) {
1957 /* Maximum Unmap LBA Count */
1958 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1959
1960 /* Maximum Unmap Block Descriptor Count */
1961 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1962 }
1963
1964 /* Unmap Granularity Alignment */
1965 if (sdebug_unmap_alignment) {
1966 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1967 arr[28] |= 0x80; /* UGAVALID */
1968 }
1969
1970 /* Optimal Unmap Granularity */
1971 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1972
1973 /* Maximum WRITE SAME Length */
1974 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1975
1976 if (sdebug_atomic_wr) {
1977 put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1978 put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1979 put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1980 put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1981 put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1982 }
1983
1984 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1985}
1986
1987/* Block device characteristics VPD page (SBC-3) */
1988static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1989{
1990 memset(arr, 0, 0x3c);
1991 arr[0] = 0;
1992 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1993 arr[2] = 0;
1994 arr[3] = 5; /* less than 1.8" */
1995
1996 return 0x3c;
1997}
1998
1999/* Logical block provisioning VPD page (SBC-4) */
2000static int inquiry_vpd_b2(unsigned char *arr)
2001{
2002 memset(arr, 0, 0x4);
2003 arr[0] = 0; /* threshold exponent */
2004 if (sdebug_lbpu)
2005 arr[1] = 1 << 7;
2006 if (sdebug_lbpws)
2007 arr[1] |= 1 << 6;
2008 if (sdebug_lbpws10)
2009 arr[1] |= 1 << 5;
2010 if (sdebug_lbprz && scsi_debug_lbp())
2011 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
2012 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
2013 /* minimum_percentage=0; provisioning_type=0 (unknown) */
2014 /* threshold_percentage=0 */
2015 return 0x4;
2016}
2017
2018/* Zoned block device characteristics VPD page (ZBC mandatory) */
2019static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2020{
2021 memset(arr, 0, 0x3c);
2022 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2023 /*
2024 * Set Optimal number of open sequential write preferred zones and
2025 * Optimal number of non-sequentially written sequential write
2026 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2027 * fields set to zero, apart from Max. number of open swrz_s field.
2028 */
2029 put_unaligned_be32(0xffffffff, &arr[4]);
2030 put_unaligned_be32(0xffffffff, &arr[8]);
2031 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2032 put_unaligned_be32(devip->max_open, &arr[12]);
2033 else
2034 put_unaligned_be32(0xffffffff, &arr[12]);
2035 if (devip->zcap < devip->zsize) {
2036 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2037 put_unaligned_be64(devip->zsize, &arr[20]);
2038 } else {
2039 arr[19] = 0;
2040 }
2041 return 0x3c;
2042}
2043
2044#define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
2045
2046enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2047
2048/* Block limits extension VPD page (SBC-4) */
2049static int inquiry_vpd_b7(unsigned char *arrb4)
2050{
2051 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2052 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2053 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2054 return SDEBUG_BLE_LEN_AFTER_B4;
2055}
2056
2057#define SDEBUG_LONG_INQ_SZ 96
2058#define SDEBUG_MAX_INQ_ARR_SZ 584
2059
2060static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2061{
2062 unsigned char pq_pdt;
2063 unsigned char *arr;
2064 unsigned char *cmd = scp->cmnd;
2065 u32 alloc_len, n;
2066 int ret;
2067 bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2068
2069 alloc_len = get_unaligned_be16(cmd + 3);
2070 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2071 if (! arr)
2072 return DID_REQUEUE << 16;
2073 if (scp->device->type >= 32) {
2074 is_disk = (sdebug_ptype == TYPE_DISK);
2075 is_tape = (sdebug_ptype == TYPE_TAPE);
2076 } else {
2077 is_disk = (scp->device->type == TYPE_DISK);
2078 is_tape = (scp->device->type == TYPE_TAPE);
2079 }
2080 is_zbc = devip->zoned;
2081 is_disk_zbc = (is_disk || is_zbc);
2082 have_wlun = scsi_is_wlun(scp->device->lun);
2083 if (have_wlun)
2084 pq_pdt = TYPE_WLUN; /* present, wlun */
2085 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2086 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
2087 else
2088 pq_pdt = ((scp->device->type >= 32 ?
2089 sdebug_ptype : scp->device->type) & 0x1f);
2090 arr[0] = pq_pdt;
2091 if (0x2 & cmd[1]) { /* CMDDT bit set */
2092 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2093 kfree(arr);
2094 return check_condition_result;
2095 } else if (0x1 & cmd[1]) { /* EVPD bit set */
2096 int lu_id_num, port_group_id, target_dev_id;
2097 u32 len;
2098 char lu_id_str[6];
2099 int host_no = devip->sdbg_host->shost->host_no;
2100
2101 arr[1] = cmd[2];
2102 port_group_id = (((host_no + 1) & 0x7f) << 8) +
2103 (devip->channel & 0x7f);
2104 if (sdebug_vpd_use_hostno == 0)
2105 host_no = 0;
2106 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2107 (devip->target * 1000) + devip->lun);
2108 target_dev_id = ((host_no + 1) * 2000) +
2109 (devip->target * 1000) - 3;
2110 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2111 if (0 == cmd[2]) { /* supported vital product data pages */
2112 n = 4;
2113 arr[n++] = 0x0; /* this page */
2114 arr[n++] = 0x80; /* unit serial number */
2115 arr[n++] = 0x83; /* device identification */
2116 arr[n++] = 0x84; /* software interface ident. */
2117 arr[n++] = 0x85; /* management network addresses */
2118 arr[n++] = 0x86; /* extended inquiry */
2119 arr[n++] = 0x87; /* mode page policy */
2120 arr[n++] = 0x88; /* SCSI ports */
2121 if (is_disk_zbc) { /* SBC or ZBC */
2122 arr[n++] = 0x89; /* ATA information */
2123 arr[n++] = 0xb0; /* Block limits */
2124 arr[n++] = 0xb1; /* Block characteristics */
2125 if (is_disk)
2126 arr[n++] = 0xb2; /* LB Provisioning */
2127 if (is_zbc)
2128 arr[n++] = 0xb6; /* ZB dev. char. */
2129 arr[n++] = 0xb7; /* Block limits extension */
2130 }
2131 arr[3] = n - 4; /* number of supported VPD pages */
2132 } else if (0x80 == cmd[2]) { /* unit serial number */
2133 arr[3] = len;
2134 memcpy(&arr[4], lu_id_str, len);
2135 } else if (0x83 == cmd[2]) { /* device identification */
2136 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2137 target_dev_id, lu_id_num,
2138 lu_id_str, len,
2139 &devip->lu_name);
2140 } else if (0x84 == cmd[2]) { /* Software interface ident. */
2141 arr[3] = inquiry_vpd_84(&arr[4]);
2142 } else if (0x85 == cmd[2]) { /* Management network addresses */
2143 arr[3] = inquiry_vpd_85(&arr[4]);
2144 } else if (0x86 == cmd[2]) { /* extended inquiry */
2145 arr[3] = 0x3c; /* number of following entries */
2146 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2147 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
2148 else if (have_dif_prot)
2149 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
2150 else
2151 arr[4] = 0x0; /* no protection stuff */
2152 /*
2153 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2154 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2155 */
2156 arr[5] = 0x17;
2157 } else if (0x87 == cmd[2]) { /* mode page policy */
2158 arr[3] = 0x8; /* number of following entries */
2159 arr[4] = 0x2; /* disconnect-reconnect mp */
2160 arr[6] = 0x80; /* mlus, shared */
2161 arr[8] = 0x18; /* protocol specific lu */
2162 arr[10] = 0x82; /* mlus, per initiator port */
2163 } else if (0x88 == cmd[2]) { /* SCSI Ports */
2164 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2165 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2166 n = inquiry_vpd_89(&arr[4]);
2167 put_unaligned_be16(n, arr + 2);
2168 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2169 arr[3] = inquiry_vpd_b0(&arr[4]);
2170 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2171 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2172 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2173 arr[3] = inquiry_vpd_b2(&arr[4]);
2174 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2175 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2176 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2177 arr[3] = inquiry_vpd_b7(&arr[4]);
2178 } else {
2179 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2180 kfree(arr);
2181 return check_condition_result;
2182 }
2183 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2184 ret = fill_from_dev_buffer(scp, arr,
2185 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2186 kfree(arr);
2187 return ret;
2188 }
2189 /* drops through here for a standard inquiry */
2190 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2191 arr[2] = sdebug_scsi_level;
2192 arr[3] = 2; /* response_data_format==2 */
2193 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2194 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2195 if (sdebug_vpd_use_hostno == 0)
2196 arr[5] |= 0x10; /* claim: implicit TPGS */
2197 arr[6] = 0x10; /* claim: MultiP */
2198 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2199 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2200 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2201 memcpy(&arr[16], sdebug_inq_product_id, 16);
2202 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2203 /* Use Vendor Specific area to place driver date in ASCII hex */
2204 memcpy(&arr[36], sdebug_version_date, 8);
2205 /* version descriptors (2 bytes each) follow */
2206 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2207 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2208 n = 62;
2209 if (is_disk) { /* SBC-4 no version claimed */
2210 put_unaligned_be16(0x600, arr + n);
2211 n += 2;
2212 } else if (is_tape) { /* SSC-4 rev 3 */
2213 put_unaligned_be16(0x525, arr + n);
2214 n += 2;
2215 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2216 put_unaligned_be16(0x624, arr + n);
2217 n += 2;
2218 }
2219 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2220 ret = fill_from_dev_buffer(scp, arr,
2221 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2222 kfree(arr);
2223 return ret;
2224}
2225
2226/* See resp_iec_m_pg() for how this data is manipulated */
2227static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2228 0, 0, 0x0, 0x0};
2229
2230static int resp_requests(struct scsi_cmnd *scp,
2231 struct sdebug_dev_info *devip)
2232{
2233 unsigned char *cmd = scp->cmnd;
2234 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2235 bool dsense = !!(cmd[1] & 1);
2236 u32 alloc_len = cmd[4];
2237 u32 len = 18;
2238 int stopped_state = atomic_read(&devip->stopped);
2239
2240 memset(arr, 0, sizeof(arr));
2241 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2242 if (dsense) {
2243 arr[0] = 0x72;
2244 arr[1] = NOT_READY;
2245 arr[2] = LOGICAL_UNIT_NOT_READY;
2246 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2247 len = 8;
2248 } else {
2249 arr[0] = 0x70;
2250 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2251 arr[7] = 0xa; /* 18 byte sense buffer */
2252 arr[12] = LOGICAL_UNIT_NOT_READY;
2253 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2254 }
2255 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2256 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2257 if (dsense) {
2258 arr[0] = 0x72;
2259 arr[1] = 0x0; /* NO_SENSE in sense_key */
2260 arr[2] = THRESHOLD_EXCEEDED;
2261 arr[3] = 0xff; /* Failure prediction(false) */
2262 len = 8;
2263 } else {
2264 arr[0] = 0x70;
2265 arr[2] = 0x0; /* NO_SENSE in sense_key */
2266 arr[7] = 0xa; /* 18 byte sense buffer */
2267 arr[12] = THRESHOLD_EXCEEDED;
2268 arr[13] = 0xff; /* Failure prediction(false) */
2269 }
2270 } else { /* nothing to report */
2271 if (dsense) {
2272 len = 8;
2273 memset(arr, 0, len);
2274 arr[0] = 0x72;
2275 } else {
2276 memset(arr, 0, len);
2277 arr[0] = 0x70;
2278 arr[7] = 0xa;
2279 }
2280 }
2281 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2282}
2283
2284static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2285{
2286 unsigned char *cmd = scp->cmnd;
2287 int power_cond, want_stop, stopped_state;
2288 bool changing;
2289
2290 power_cond = (cmd[4] & 0xf0) >> 4;
2291 if (power_cond) {
2292 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2293 return check_condition_result;
2294 }
2295 want_stop = !(cmd[4] & 1);
2296 stopped_state = atomic_read(&devip->stopped);
2297 if (stopped_state == 2) {
2298 ktime_t now_ts = ktime_get_boottime();
2299
2300 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2301 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2302
2303 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2304 /* tur_ms_to_ready timer extinguished */
2305 atomic_set(&devip->stopped, 0);
2306 stopped_state = 0;
2307 }
2308 }
2309 if (stopped_state == 2) {
2310 if (want_stop) {
2311 stopped_state = 1; /* dummy up success */
2312 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2313 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2314 return check_condition_result;
2315 }
2316 }
2317 }
2318 changing = (stopped_state != want_stop);
2319 if (changing)
2320 atomic_xchg(&devip->stopped, want_stop);
2321 if (scp->device->type == TYPE_TAPE && !want_stop) {
2322 int i;
2323
2324 set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2325 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2326 devip->tape_location[i] = 0;
2327 devip->tape_partition = 0;
2328 }
2329 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2330 return SDEG_RES_IMMED_MASK;
2331 else
2332 return 0;
2333}
2334
2335static sector_t get_sdebug_capacity(void)
2336{
2337 static const unsigned int gibibyte = 1073741824;
2338
2339 if (sdebug_virtual_gb > 0)
2340 return (sector_t)sdebug_virtual_gb *
2341 (gibibyte / sdebug_sector_size);
2342 else
2343 return sdebug_store_sectors;
2344}
2345
2346#define SDEBUG_READCAP_ARR_SZ 8
2347static int resp_readcap(struct scsi_cmnd *scp,
2348 struct sdebug_dev_info *devip)
2349{
2350 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2351 unsigned int capac;
2352
2353 /* following just in case virtual_gb changed */
2354 sdebug_capacity = get_sdebug_capacity();
2355 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2356 if (sdebug_capacity < 0xffffffff) {
2357 capac = (unsigned int)sdebug_capacity - 1;
2358 put_unaligned_be32(capac, arr + 0);
2359 } else
2360 put_unaligned_be32(0xffffffff, arr + 0);
2361 put_unaligned_be16(sdebug_sector_size, arr + 6);
2362 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2363}
2364
2365#define SDEBUG_READCAP16_ARR_SZ 32
2366static int resp_readcap16(struct scsi_cmnd *scp,
2367 struct sdebug_dev_info *devip)
2368{
2369 unsigned char *cmd = scp->cmnd;
2370 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2371 u32 alloc_len;
2372
2373 alloc_len = get_unaligned_be32(cmd + 10);
2374 /* following just in case virtual_gb changed */
2375 sdebug_capacity = get_sdebug_capacity();
2376 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2377 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2378 put_unaligned_be32(sdebug_sector_size, arr + 8);
2379 arr[13] = sdebug_physblk_exp & 0xf;
2380 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2381
2382 if (scsi_debug_lbp()) {
2383 arr[14] |= 0x80; /* LBPME */
2384 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2385 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2386 * in the wider field maps to 0 in this field.
2387 */
2388 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2389 arr[14] |= 0x40;
2390 }
2391
2392 /*
2393 * Since the scsi_debug READ CAPACITY implementation always reports the
2394 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2395 */
2396 if (devip->zoned)
2397 arr[12] |= 1 << 4;
2398
2399 arr[15] = sdebug_lowest_aligned & 0xff;
2400
2401 if (have_dif_prot) {
2402 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2403 arr[12] |= 1; /* PROT_EN */
2404 }
2405
2406 return fill_from_dev_buffer(scp, arr,
2407 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2408}
2409
2410#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2411
2412static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2413 struct sdebug_dev_info *devip)
2414{
2415 unsigned char *cmd = scp->cmnd;
2416 unsigned char *arr;
2417 int host_no = devip->sdbg_host->shost->host_no;
2418 int port_group_a, port_group_b, port_a, port_b;
2419 u32 alen, n, rlen;
2420 int ret;
2421
2422 alen = get_unaligned_be32(cmd + 6);
2423 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2424 if (! arr)
2425 return DID_REQUEUE << 16;
2426 /*
2427 * EVPD page 0x88 states we have two ports, one
2428 * real and a fake port with no device connected.
2429 * So we create two port groups with one port each
2430 * and set the group with port B to unavailable.
2431 */
2432 port_a = 0x1; /* relative port A */
2433 port_b = 0x2; /* relative port B */
2434 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2435 (devip->channel & 0x7f);
2436 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2437 (devip->channel & 0x7f) + 0x80;
2438
2439 /*
2440 * The asymmetric access state is cycled according to the host_id.
2441 */
2442 n = 4;
2443 if (sdebug_vpd_use_hostno == 0) {
2444 arr[n++] = host_no % 3; /* Asymm access state */
2445 arr[n++] = 0x0F; /* claim: all states are supported */
2446 } else {
2447 arr[n++] = 0x0; /* Active/Optimized path */
2448 arr[n++] = 0x01; /* only support active/optimized paths */
2449 }
2450 put_unaligned_be16(port_group_a, arr + n);
2451 n += 2;
2452 arr[n++] = 0; /* Reserved */
2453 arr[n++] = 0; /* Status code */
2454 arr[n++] = 0; /* Vendor unique */
2455 arr[n++] = 0x1; /* One port per group */
2456 arr[n++] = 0; /* Reserved */
2457 arr[n++] = 0; /* Reserved */
2458 put_unaligned_be16(port_a, arr + n);
2459 n += 2;
2460 arr[n++] = 3; /* Port unavailable */
2461 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2462 put_unaligned_be16(port_group_b, arr + n);
2463 n += 2;
2464 arr[n++] = 0; /* Reserved */
2465 arr[n++] = 0; /* Status code */
2466 arr[n++] = 0; /* Vendor unique */
2467 arr[n++] = 0x1; /* One port per group */
2468 arr[n++] = 0; /* Reserved */
2469 arr[n++] = 0; /* Reserved */
2470 put_unaligned_be16(port_b, arr + n);
2471 n += 2;
2472
2473 rlen = n - 4;
2474 put_unaligned_be32(rlen, arr + 0);
2475
2476 /*
2477 * Return the smallest value of either
2478 * - The allocated length
2479 * - The constructed command length
2480 * - The maximum array size
2481 */
2482 rlen = min(alen, n);
2483 ret = fill_from_dev_buffer(scp, arr,
2484 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2485 kfree(arr);
2486 return ret;
2487}
2488
2489static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2490 struct sdebug_dev_info *devip)
2491{
2492 bool rctd;
2493 u8 reporting_opts, req_opcode, sdeb_i, supp;
2494 u16 req_sa, u;
2495 u32 alloc_len, a_len;
2496 int k, offset, len, errsts, bump, na;
2497 const struct opcode_info_t *oip;
2498 const struct opcode_info_t *r_oip;
2499 u8 *arr;
2500 u8 *cmd = scp->cmnd;
2501 u32 devsel = sdebug_get_devsel(scp->device);
2502
2503 rctd = !!(cmd[2] & 0x80);
2504 reporting_opts = cmd[2] & 0x7;
2505 req_opcode = cmd[3];
2506 req_sa = get_unaligned_be16(cmd + 4);
2507 alloc_len = get_unaligned_be32(cmd + 6);
2508 if (alloc_len < 4 || alloc_len > 0xffff) {
2509 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2510 return check_condition_result;
2511 }
2512 if (alloc_len > 8192)
2513 a_len = 8192;
2514 else
2515 a_len = alloc_len;
2516 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2517 if (NULL == arr) {
2518 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2519 INSUFF_RES_ASCQ);
2520 return check_condition_result;
2521 }
2522 switch (reporting_opts) {
2523 case 0: /* all commands */
2524 bump = rctd ? 20 : 8;
2525 for (offset = 4, oip = opcode_info_arr;
2526 oip->num_attached != 0xff && offset < a_len; ++oip) {
2527 if (F_INV_OP & oip->flags)
2528 continue;
2529 if ((devsel & oip->devsel) != 0) {
2530 arr[offset] = oip->opcode;
2531 put_unaligned_be16(oip->sa, arr + offset + 2);
2532 if (rctd)
2533 arr[offset + 5] |= 0x2;
2534 if (FF_SA & oip->flags)
2535 arr[offset + 5] |= 0x1;
2536 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2537 if (rctd)
2538 put_unaligned_be16(0xa, arr + offset + 8);
2539 offset += bump;
2540 }
2541 na = oip->num_attached;
2542 r_oip = oip;
2543 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2544 if (F_INV_OP & oip->flags)
2545 continue;
2546 if ((devsel & oip->devsel) == 0)
2547 continue;
2548 arr[offset] = oip->opcode;
2549 put_unaligned_be16(oip->sa, arr + offset + 2);
2550 if (rctd)
2551 arr[offset + 5] |= 0x2;
2552 if (FF_SA & oip->flags)
2553 arr[offset + 5] |= 0x1;
2554 put_unaligned_be16(oip->len_mask[0],
2555 arr + offset + 6);
2556 if (rctd)
2557 put_unaligned_be16(0xa,
2558 arr + offset + 8);
2559 offset += bump;
2560 }
2561 oip = r_oip;
2562 }
2563 put_unaligned_be32(offset - 4, arr);
2564 break;
2565 case 1: /* one command: opcode only */
2566 case 2: /* one command: opcode plus service action */
2567 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2568 sdeb_i = opcode_ind_arr[req_opcode];
2569 oip = &opcode_info_arr[sdeb_i];
2570 if (F_INV_OP & oip->flags) {
2571 supp = 1;
2572 offset = 4;
2573 } else {
2574 if (1 == reporting_opts) {
2575 if (FF_SA & oip->flags) {
2576 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2577 2, 2);
2578 kfree(arr);
2579 return check_condition_result;
2580 }
2581 req_sa = 0;
2582 } else if (2 == reporting_opts &&
2583 0 == (FF_SA & oip->flags)) {
2584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2585 kfree(arr); /* point at requested sa */
2586 return check_condition_result;
2587 }
2588 if (0 == (FF_SA & oip->flags) &&
2589 (devsel & oip->devsel) != 0 &&
2590 req_opcode == oip->opcode)
2591 supp = 3;
2592 else if (0 == (FF_SA & oip->flags)) {
2593 na = oip->num_attached;
2594 for (k = 0, oip = oip->arrp; k < na;
2595 ++k, ++oip) {
2596 if (req_opcode == oip->opcode &&
2597 (devsel & oip->devsel) != 0)
2598 break;
2599 }
2600 supp = (k >= na) ? 1 : 3;
2601 } else if (req_sa != oip->sa) {
2602 na = oip->num_attached;
2603 for (k = 0, oip = oip->arrp; k < na;
2604 ++k, ++oip) {
2605 if (req_sa == oip->sa &&
2606 (devsel & oip->devsel) != 0)
2607 break;
2608 }
2609 supp = (k >= na) ? 1 : 3;
2610 } else
2611 supp = 3;
2612 if (3 == supp) {
2613 u = oip->len_mask[0];
2614 put_unaligned_be16(u, arr + 2);
2615 arr[4] = oip->opcode;
2616 for (k = 1; k < u; ++k)
2617 arr[4 + k] = (k < 16) ?
2618 oip->len_mask[k] : 0xff;
2619 offset = 4 + u;
2620 } else
2621 offset = 4;
2622 }
2623 arr[1] = (rctd ? 0x80 : 0) | supp;
2624 if (rctd) {
2625 put_unaligned_be16(0xa, arr + offset);
2626 offset += 12;
2627 }
2628 break;
2629 default:
2630 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2631 kfree(arr);
2632 return check_condition_result;
2633 }
2634 offset = (offset < a_len) ? offset : a_len;
2635 len = (offset < alloc_len) ? offset : alloc_len;
2636 errsts = fill_from_dev_buffer(scp, arr, len);
2637 kfree(arr);
2638 return errsts;
2639}
2640
2641static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2642 struct sdebug_dev_info *devip)
2643{
2644 bool repd;
2645 u32 alloc_len, len;
2646 u8 arr[16];
2647 u8 *cmd = scp->cmnd;
2648
2649 memset(arr, 0, sizeof(arr));
2650 repd = !!(cmd[2] & 0x80);
2651 alloc_len = get_unaligned_be32(cmd + 6);
2652 if (alloc_len < 4) {
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2654 return check_condition_result;
2655 }
2656 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2657 arr[1] = 0x1; /* ITNRS */
2658 if (repd) {
2659 arr[3] = 0xc;
2660 len = 16;
2661 } else
2662 len = 4;
2663
2664 len = (len < alloc_len) ? len : alloc_len;
2665 return fill_from_dev_buffer(scp, arr, len);
2666}
2667
2668/* <<Following mode page info copied from ST318451LW>> */
2669
2670static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2671{ /* Read-Write Error Recovery page for mode_sense */
2672 static const unsigned char err_recov_pg[] = {
2673 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2674 5, 0, 0xff, 0xff
2675 };
2676
2677 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2678 if (1 == pcontrol)
2679 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2680 return sizeof(err_recov_pg);
2681}
2682
2683static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2684{ /* Disconnect-Reconnect page for mode_sense */
2685 static const unsigned char disconnect_pg[] = {
2686 0x2, 0xe, 128, 128, 0, 10, 0, 0,
2687 0, 0, 0, 0, 0, 0, 0, 0
2688 };
2689
2690 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2691 if (1 == pcontrol)
2692 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2693 return sizeof(disconnect_pg);
2694}
2695
2696static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2697{ /* Format device page for mode_sense */
2698 static const unsigned char format_pg[] = {
2699 0x3, 0x16, 0, 0, 0, 0, 0, 0,
2700 0, 0, 0, 0, 0, 0, 0, 0,
2701 0, 0, 0, 0, 0x40, 0, 0, 0
2702 };
2703
2704 memcpy(p, format_pg, sizeof(format_pg));
2705 put_unaligned_be16(sdebug_sectors_per, p + 10);
2706 put_unaligned_be16(sdebug_sector_size, p + 12);
2707 if (sdebug_removable)
2708 p[20] |= 0x20; /* should agree with INQUIRY */
2709 if (1 == pcontrol)
2710 memset(p + 2, 0, sizeof(format_pg) - 2);
2711 return sizeof(format_pg);
2712}
2713
2714static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2715 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2716 0, 0, 0, 0};
2717
2718static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2719{ /* Caching page for mode_sense */
2720 static const unsigned char ch_caching_pg[] = {
2721 /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2722 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2723 };
2724 static const unsigned char d_caching_pg[] = {
2725 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2726 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
2727 };
2728
2729 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2730 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2731 memcpy(p, caching_pg, sizeof(caching_pg));
2732 if (1 == pcontrol)
2733 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2734 else if (2 == pcontrol)
2735 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2736 return sizeof(caching_pg);
2737}
2738
2739static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2740 0, 0, 0x2, 0x4b};
2741
2742static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2743{ /* Control mode page for mode_sense */
2744 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2745 0, 0, 0, 0};
2746 static const unsigned char d_ctrl_m_pg[] = {
2747 0xa, 10, 2, 0, 0, 0, 0, 0,
2748 0, 0, 0x2, 0x4b
2749 };
2750
2751 if (sdebug_dsense)
2752 ctrl_m_pg[2] |= 0x4;
2753 else
2754 ctrl_m_pg[2] &= ~0x4;
2755
2756 if (sdebug_ato)
2757 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2758
2759 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2760 if (1 == pcontrol)
2761 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2762 else if (2 == pcontrol)
2763 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2764 return sizeof(ctrl_m_pg);
2765}
2766
2767/* IO Advice Hints Grouping mode page */
2768static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2769{
2770 /* IO Advice Hints Grouping mode page */
2771 struct grouping_m_pg {
2772 u8 page_code; /* OR 0x40 when subpage_code > 0 */
2773 u8 subpage_code;
2774 __be16 page_length;
2775 u8 reserved[12];
2776 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2777 };
2778 static const struct grouping_m_pg gr_m_pg = {
2779 .page_code = 0xa | 0x40,
2780 .subpage_code = 5,
2781 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2782 .descr = {
2783 { .st_enble = 1 },
2784 { .st_enble = 1 },
2785 { .st_enble = 1 },
2786 { .st_enble = 1 },
2787 { .st_enble = 1 },
2788 { .st_enble = 0 },
2789 }
2790 };
2791
2792 BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2793 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2794 memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2795 if (1 == pcontrol) {
2796 /* There are no changeable values so clear from byte 4 on. */
2797 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2798 }
2799 return sizeof(gr_m_pg);
2800}
2801
2802static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2803{ /* Informational Exceptions control mode page for mode_sense */
2804 static const unsigned char ch_iec_m_pg[] = {
2805 /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2806 0, 0, 0x0, 0x0
2807 };
2808 static const unsigned char d_iec_m_pg[] = {
2809 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2810 0, 0, 0x0, 0x0
2811 };
2812
2813 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2814 if (1 == pcontrol)
2815 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2816 else if (2 == pcontrol)
2817 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2818 return sizeof(iec_m_pg);
2819}
2820
2821static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2822{ /* SAS SSP mode page - short format for mode_sense */
2823 static const unsigned char sas_sf_m_pg[] = {
2824 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
2825 };
2826
2827 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2828 if (1 == pcontrol)
2829 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2830 return sizeof(sas_sf_m_pg);
2831}
2832
2833
2834static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2835 int target_dev_id)
2836{ /* SAS phy control and discover mode page for mode_sense */
2837 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2838 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2839 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2840 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2841 0x2, 0, 0, 0, 0, 0, 0, 0,
2842 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2843 0, 0, 0, 0, 0, 0, 0, 0,
2844 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2845 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2846 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2847 0x3, 0, 0, 0, 0, 0, 0, 0,
2848 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2849 0, 0, 0, 0, 0, 0, 0, 0,
2850 };
2851 int port_a, port_b;
2852
2853 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2854 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2855 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2856 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2857 port_a = target_dev_id + 1;
2858 port_b = port_a + 1;
2859 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2860 put_unaligned_be32(port_a, p + 20);
2861 put_unaligned_be32(port_b, p + 48 + 20);
2862 if (1 == pcontrol)
2863 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2864 return sizeof(sas_pcd_m_pg);
2865}
2866
2867static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2868{ /* SAS SSP shared protocol specific port mode subpage */
2869 static const unsigned char sas_sha_m_pg[] = {
2870 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2871 0, 0, 0, 0, 0, 0, 0, 0,
2872 };
2873
2874 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2875 if (1 == pcontrol)
2876 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2877 return sizeof(sas_sha_m_pg);
2878}
2879
2880static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2881 0xff, 0xff, 0x00, 0x00};
2882
2883static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2884{ /* Partition page for mode_sense (tape) */
2885 memcpy(p, partition_pg, sizeof(partition_pg));
2886 if (pcontrol == 1)
2887 memset(p + 2, 0, sizeof(partition_pg) - 2);
2888 return sizeof(partition_pg);
2889}
2890
2891static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2892 unsigned char *new, int pg_len)
2893{
2894 int new_nbr, p0_size, p1_size;
2895
2896 if ((new[4] & 0x80) != 0) { /* FDP */
2897 partition_pg[4] |= 0x80;
2898 devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2899 devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2900 devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2901 } else {
2902 new_nbr = new[3] + 1;
2903 if (new_nbr > TAPE_MAX_PARTITIONS)
2904 return 3;
2905 if ((new[4] & 0x40) != 0) { /* SDP */
2906 p1_size = TAPE_PARTITION_1_UNITS;
2907 p0_size = TAPE_UNITS - p1_size;
2908 if (p0_size < 100)
2909 return 4;
2910 } else if ((new[4] & 0x20) != 0) {
2911 if (new_nbr > 1) {
2912 p0_size = get_unaligned_be16(new + 8);
2913 p1_size = get_unaligned_be16(new + 10);
2914 if (p1_size == 0xFFFF)
2915 p1_size = TAPE_UNITS - p0_size;
2916 else if (p0_size == 0xFFFF)
2917 p0_size = TAPE_UNITS - p1_size;
2918 if (p0_size < 100 || p1_size < 100)
2919 return 8;
2920 } else {
2921 p0_size = TAPE_UNITS;
2922 p1_size = 0;
2923 }
2924 } else
2925 return 6;
2926 devip->tape_pending_nbr_partitions = new_nbr;
2927 devip->tape_pending_part_0_size = p0_size;
2928 devip->tape_pending_part_1_size = p1_size;
2929 partition_pg[3] = new_nbr;
2930 devip->tape_pending_nbr_partitions = new_nbr;
2931 }
2932
2933 return 0;
2934}
2935
2936static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2937 unsigned char dce)
2938{ /* Compression page for mode_sense (tape) */
2939 static const unsigned char compression_pg[] = {
2940 0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2941 0, 0, 0, 0, 0, 0
2942 };
2943
2944 memcpy(p, compression_pg, sizeof(compression_pg));
2945 if (dce)
2946 p[2] |= 0x80;
2947 if (pcontrol == 1)
2948 memset(p + 2, 0, sizeof(compression_pg) - 2);
2949 return sizeof(compression_pg);
2950}
2951
2952/* PAGE_SIZE is more than necessary but provides room for future expansion. */
2953#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2954
2955static int resp_mode_sense(struct scsi_cmnd *scp,
2956 struct sdebug_dev_info *devip)
2957{
2958 int pcontrol, pcode, subpcode, bd_len;
2959 unsigned char dev_spec;
2960 u32 alloc_len, offset, len;
2961 int target_dev_id;
2962 int target = scp->device->id;
2963 unsigned char *ap;
2964 unsigned char *arr __free(kfree);
2965 unsigned char *cmd = scp->cmnd;
2966 bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2967
2968 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2969 if (!arr)
2970 return -ENOMEM;
2971 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2972 pcontrol = (cmd[2] & 0xc0) >> 6;
2973 pcode = cmd[2] & 0x3f;
2974 subpcode = cmd[3];
2975 msense_6 = (MODE_SENSE == cmd[0]);
2976 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2977 is_disk = (scp->device->type == TYPE_DISK);
2978 is_zbc = devip->zoned;
2979 is_tape = (scp->device->type == TYPE_TAPE);
2980 if ((is_disk || is_zbc || is_tape) && !dbd)
2981 bd_len = llbaa ? 16 : 8;
2982 else
2983 bd_len = 0;
2984 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2985 if (0x3 == pcontrol) { /* Saving values not supported */
2986 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2987 return check_condition_result;
2988 }
2989 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2990 (devip->target * 1000) - 3;
2991 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2992 if (is_disk || is_zbc) {
2993 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2994 if (sdebug_wp)
2995 dev_spec |= 0x80;
2996 } else
2997 dev_spec = 0x0;
2998 if (msense_6) {
2999 arr[2] = dev_spec;
3000 arr[3] = bd_len;
3001 offset = 4;
3002 } else {
3003 arr[3] = dev_spec;
3004 if (16 == bd_len)
3005 arr[4] = 0x1; /* set LONGLBA bit */
3006 arr[7] = bd_len; /* assume 255 or less */
3007 offset = 8;
3008 }
3009 ap = arr + offset;
3010 if ((bd_len > 0) && (!sdebug_capacity))
3011 sdebug_capacity = get_sdebug_capacity();
3012
3013 if (8 == bd_len) {
3014 if (sdebug_capacity > 0xfffffffe)
3015 put_unaligned_be32(0xffffffff, ap + 0);
3016 else
3017 put_unaligned_be32(sdebug_capacity, ap + 0);
3018 if (is_tape) {
3019 ap[0] = devip->tape_density;
3020 put_unaligned_be16(devip->tape_blksize, ap + 6);
3021 } else
3022 put_unaligned_be16(sdebug_sector_size, ap + 6);
3023 offset += bd_len;
3024 ap = arr + offset;
3025 } else if (16 == bd_len) {
3026 if (is_tape) {
3027 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3028 return check_condition_result;
3029 }
3030 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3031 put_unaligned_be32(sdebug_sector_size, ap + 12);
3032 offset += bd_len;
3033 ap = arr + offset;
3034 }
3035 if (cmd[2] == 0)
3036 goto only_bd; /* Only block descriptor requested */
3037
3038 /*
3039 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3040 * len += resp_*_pg(ap + len, pcontrol, target);
3041 */
3042 switch (pcode) {
3043 case 0x1: /* Read-Write error recovery page, direct access */
3044 if (subpcode > 0x0 && subpcode < 0xff)
3045 goto bad_subpcode;
3046 len = resp_err_recov_pg(ap, pcontrol, target);
3047 offset += len;
3048 break;
3049 case 0x2: /* Disconnect-Reconnect page, all devices */
3050 if (subpcode > 0x0 && subpcode < 0xff)
3051 goto bad_subpcode;
3052 len = resp_disconnect_pg(ap, pcontrol, target);
3053 offset += len;
3054 break;
3055 case 0x3: /* Format device page, direct access */
3056 if (subpcode > 0x0 && subpcode < 0xff)
3057 goto bad_subpcode;
3058 if (is_disk) {
3059 len = resp_format_pg(ap, pcontrol, target);
3060 offset += len;
3061 } else {
3062 goto bad_pcode;
3063 }
3064 break;
3065 case 0x8: /* Caching page, direct access */
3066 if (subpcode > 0x0 && subpcode < 0xff)
3067 goto bad_subpcode;
3068 if (is_disk || is_zbc) {
3069 len = resp_caching_pg(ap, pcontrol, target);
3070 offset += len;
3071 } else {
3072 goto bad_pcode;
3073 }
3074 break;
3075 case 0xa: /* Control Mode page, all devices */
3076 switch (subpcode) {
3077 case 0:
3078 len = resp_ctrl_m_pg(ap, pcontrol, target);
3079 break;
3080 case 0x05:
3081 len = resp_grouping_m_pg(ap, pcontrol, target);
3082 break;
3083 case 0xff:
3084 len = resp_ctrl_m_pg(ap, pcontrol, target);
3085 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3086 break;
3087 default:
3088 goto bad_subpcode;
3089 }
3090 offset += len;
3091 break;
3092 case 0xf: /* Compression Mode Page (tape) */
3093 if (!is_tape)
3094 goto bad_pcode;
3095 len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3096 offset += len;
3097 break;
3098 case 0x11: /* Partition Mode Page (tape) */
3099 if (!is_tape)
3100 goto bad_pcode;
3101 len = resp_partition_m_pg(ap, pcontrol, target);
3102 offset += len;
3103 break;
3104 case 0x19: /* if spc==1 then sas phy, control+discover */
3105 if (subpcode > 0x2 && subpcode < 0xff)
3106 goto bad_subpcode;
3107 len = 0;
3108 if ((0x0 == subpcode) || (0xff == subpcode))
3109 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3110 if ((0x1 == subpcode) || (0xff == subpcode))
3111 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3112 target_dev_id);
3113 if ((0x2 == subpcode) || (0xff == subpcode))
3114 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3115 offset += len;
3116 break;
3117 case 0x1c: /* Informational Exceptions Mode page, all devices */
3118 if (subpcode > 0x0 && subpcode < 0xff)
3119 goto bad_subpcode;
3120 len = resp_iec_m_pg(ap, pcontrol, target);
3121 offset += len;
3122 break;
3123 case 0x3f: /* Read all Mode pages */
3124 if (subpcode > 0x0 && subpcode < 0xff)
3125 goto bad_subpcode;
3126 len = resp_err_recov_pg(ap, pcontrol, target);
3127 len += resp_disconnect_pg(ap + len, pcontrol, target);
3128 if (is_disk) {
3129 len += resp_format_pg(ap + len, pcontrol, target);
3130 len += resp_caching_pg(ap + len, pcontrol, target);
3131 } else if (is_zbc) {
3132 len += resp_caching_pg(ap + len, pcontrol, target);
3133 }
3134 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3135 if (0xff == subpcode)
3136 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3137 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3138 if (0xff == subpcode) {
3139 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3140 target_dev_id);
3141 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3142 }
3143 len += resp_iec_m_pg(ap + len, pcontrol, target);
3144 offset += len;
3145 break;
3146 default:
3147 goto bad_pcode;
3148 }
3149only_bd:
3150 if (msense_6)
3151 arr[0] = offset - 1;
3152 else
3153 put_unaligned_be16((offset - 2), arr + 0);
3154 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3155
3156bad_pcode:
3157 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3158 return check_condition_result;
3159
3160bad_subpcode:
3161 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3162 return check_condition_result;
3163}
3164
3165#define SDEBUG_MAX_MSELECT_SZ 512
3166
3167static int resp_mode_select(struct scsi_cmnd *scp,
3168 struct sdebug_dev_info *devip)
3169{
3170 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3171 int param_len, res, mpage;
3172 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3173 unsigned char *cmd = scp->cmnd;
3174 int mselect6 = (MODE_SELECT == cmd[0]);
3175
3176 memset(arr, 0, sizeof(arr));
3177 pf = cmd[1] & 0x10;
3178 sp = cmd[1] & 0x1;
3179 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3180 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3181 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3182 return check_condition_result;
3183 }
3184 res = fetch_to_dev_buffer(scp, arr, param_len);
3185 if (-1 == res)
3186 return DID_ERROR << 16;
3187 else if (sdebug_verbose && (res < param_len))
3188 sdev_printk(KERN_INFO, scp->device,
3189 "%s: cdb indicated=%d, IO sent=%d bytes\n",
3190 __func__, param_len, res);
3191 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3192 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3193 off = (mselect6 ? 4 : 8);
3194 if (scp->device->type == TYPE_TAPE) {
3195 int blksize;
3196
3197 if (bd_len != 8) {
3198 mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3199 mselect6 ? 3 : 6, -1);
3200 return check_condition_result;
3201 }
3202 if (arr[off] == TAPE_BAD_DENSITY) {
3203 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3204 return check_condition_result;
3205 }
3206 blksize = get_unaligned_be16(arr + off + 6);
3207 if (blksize != 0 &&
3208 (blksize < TAPE_MIN_BLKSIZE ||
3209 blksize > TAPE_MAX_BLKSIZE ||
3210 (blksize % 4) != 0)) {
3211 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3212 return check_condition_result;
3213 }
3214 devip->tape_density = arr[off];
3215 devip->tape_blksize = blksize;
3216 }
3217 off += bd_len;
3218 if (off >= res)
3219 return 0; /* No page written, just descriptors */
3220 if (md_len > 2) {
3221 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3222 return check_condition_result;
3223 }
3224 mpage = arr[off] & 0x3f;
3225 ps = !!(arr[off] & 0x80);
3226 if (ps) {
3227 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3228 return check_condition_result;
3229 }
3230 spf = !!(arr[off] & 0x40);
3231 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3232 (arr[off + 1] + 2);
3233 if ((pg_len + off) > param_len) {
3234 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3235 PARAMETER_LIST_LENGTH_ERR, 0);
3236 return check_condition_result;
3237 }
3238 switch (mpage) {
3239 case 0x8: /* Caching Mode page */
3240 if (caching_pg[1] == arr[off + 1]) {
3241 memcpy(caching_pg + 2, arr + off + 2,
3242 sizeof(caching_pg) - 2);
3243 goto set_mode_changed_ua;
3244 }
3245 break;
3246 case 0xa: /* Control Mode page */
3247 if (ctrl_m_pg[1] == arr[off + 1]) {
3248 memcpy(ctrl_m_pg + 2, arr + off + 2,
3249 sizeof(ctrl_m_pg) - 2);
3250 if (ctrl_m_pg[4] & 0x8)
3251 sdebug_wp = true;
3252 else
3253 sdebug_wp = false;
3254 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3255 goto set_mode_changed_ua;
3256 }
3257 break;
3258 case 0xf: /* Compression mode page */
3259 if (scp->device->type != TYPE_TAPE)
3260 goto bad_pcode;
3261 if ((arr[off + 2] & 0x40) != 0) {
3262 devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3263 return 0;
3264 }
3265 break;
3266 case 0x11: /* Medium Partition Mode Page (tape) */
3267 if (scp->device->type == TYPE_TAPE) {
3268 int fld;
3269
3270 fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3271 if (fld == 0)
3272 return 0;
3273 mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3274 return check_condition_result;
3275 }
3276 break;
3277 case 0x1c: /* Informational Exceptions Mode page */
3278 if (iec_m_pg[1] == arr[off + 1]) {
3279 memcpy(iec_m_pg + 2, arr + off + 2,
3280 sizeof(iec_m_pg) - 2);
3281 goto set_mode_changed_ua;
3282 }
3283 break;
3284 default:
3285 break;
3286 }
3287 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3288 return check_condition_result;
3289set_mode_changed_ua:
3290 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3291 return 0;
3292
3293bad_pcode:
3294 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3295 return check_condition_result;
3296}
3297
3298static int resp_temp_l_pg(unsigned char *arr)
3299{
3300 static const unsigned char temp_l_pg[] = {
3301 0x0, 0x0, 0x3, 0x2, 0x0, 38,
3302 0x0, 0x1, 0x3, 0x2, 0x0, 65,
3303 };
3304
3305 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3306 return sizeof(temp_l_pg);
3307}
3308
3309static int resp_ie_l_pg(unsigned char *arr)
3310{
3311 static const unsigned char ie_l_pg[] = {
3312 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3313 };
3314
3315 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3316 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
3317 arr[4] = THRESHOLD_EXCEEDED;
3318 arr[5] = 0xff;
3319 }
3320 return sizeof(ie_l_pg);
3321}
3322
3323static int resp_env_rep_l_spg(unsigned char *arr)
3324{
3325 static const unsigned char env_rep_l_spg[] = {
3326 0x0, 0x0, 0x23, 0x8,
3327 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3328 0x1, 0x0, 0x23, 0x8,
3329 0x0, 55, 72, 35, 55, 45, 0, 0,
3330 };
3331
3332 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3333 return sizeof(env_rep_l_spg);
3334}
3335
3336#define SDEBUG_MAX_LSENSE_SZ 512
3337
3338static int resp_log_sense(struct scsi_cmnd *scp,
3339 struct sdebug_dev_info *devip)
3340{
3341 int ppc, sp, pcode, subpcode;
3342 u32 alloc_len, len, n;
3343 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3344 unsigned char *cmd = scp->cmnd;
3345
3346 memset(arr, 0, sizeof(arr));
3347 ppc = cmd[1] & 0x2;
3348 sp = cmd[1] & 0x1;
3349 if (ppc || sp) {
3350 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3351 return check_condition_result;
3352 }
3353 pcode = cmd[2] & 0x3f;
3354 subpcode = cmd[3] & 0xff;
3355 alloc_len = get_unaligned_be16(cmd + 7);
3356 arr[0] = pcode;
3357 if (0 == subpcode) {
3358 switch (pcode) {
3359 case 0x0: /* Supported log pages log page */
3360 n = 4;
3361 arr[n++] = 0x0; /* this page */
3362 arr[n++] = 0xd; /* Temperature */
3363 arr[n++] = 0x2f; /* Informational exceptions */
3364 arr[3] = n - 4;
3365 break;
3366 case 0xd: /* Temperature log page */
3367 arr[3] = resp_temp_l_pg(arr + 4);
3368 break;
3369 case 0x2f: /* Informational exceptions log page */
3370 arr[3] = resp_ie_l_pg(arr + 4);
3371 break;
3372 default:
3373 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3374 return check_condition_result;
3375 }
3376 } else if (0xff == subpcode) {
3377 arr[0] |= 0x40;
3378 arr[1] = subpcode;
3379 switch (pcode) {
3380 case 0x0: /* Supported log pages and subpages log page */
3381 n = 4;
3382 arr[n++] = 0x0;
3383 arr[n++] = 0x0; /* 0,0 page */
3384 arr[n++] = 0x0;
3385 arr[n++] = 0xff; /* this page */
3386 arr[n++] = 0xd;
3387 arr[n++] = 0x0; /* Temperature */
3388 arr[n++] = 0xd;
3389 arr[n++] = 0x1; /* Environment reporting */
3390 arr[n++] = 0xd;
3391 arr[n++] = 0xff; /* all 0xd subpages */
3392 arr[n++] = 0x2f;
3393 arr[n++] = 0x0; /* Informational exceptions */
3394 arr[n++] = 0x2f;
3395 arr[n++] = 0xff; /* all 0x2f subpages */
3396 arr[3] = n - 4;
3397 break;
3398 case 0xd: /* Temperature subpages */
3399 n = 4;
3400 arr[n++] = 0xd;
3401 arr[n++] = 0x0; /* Temperature */
3402 arr[n++] = 0xd;
3403 arr[n++] = 0x1; /* Environment reporting */
3404 arr[n++] = 0xd;
3405 arr[n++] = 0xff; /* these subpages */
3406 arr[3] = n - 4;
3407 break;
3408 case 0x2f: /* Informational exceptions subpages */
3409 n = 4;
3410 arr[n++] = 0x2f;
3411 arr[n++] = 0x0; /* Informational exceptions */
3412 arr[n++] = 0x2f;
3413 arr[n++] = 0xff; /* these subpages */
3414 arr[3] = n - 4;
3415 break;
3416 default:
3417 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3418 return check_condition_result;
3419 }
3420 } else if (subpcode > 0) {
3421 arr[0] |= 0x40;
3422 arr[1] = subpcode;
3423 if (pcode == 0xd && subpcode == 1)
3424 arr[3] = resp_env_rep_l_spg(arr + 4);
3425 else {
3426 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3427 return check_condition_result;
3428 }
3429 } else {
3430 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3431 return check_condition_result;
3432 }
3433 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3434 return fill_from_dev_buffer(scp, arr,
3435 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3436}
3437
3438enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
3439static int resp_read_blklimits(struct scsi_cmnd *scp,
3440 struct sdebug_dev_info *devip)
3441{
3442 unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3443
3444 arr[0] = 4;
3445 put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3446 put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3447 return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3448}
3449
3450static int resp_locate(struct scsi_cmnd *scp,
3451 struct sdebug_dev_info *devip)
3452{
3453 unsigned char *cmd = scp->cmnd;
3454 unsigned int i, pos;
3455 struct tape_block *blp;
3456 int partition;
3457
3458 if ((cmd[1] & 0x02) != 0) {
3459 if (cmd[8] >= devip->tape_nbr_partitions) {
3460 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3461 return check_condition_result;
3462 }
3463 devip->tape_partition = cmd[8];
3464 }
3465 pos = get_unaligned_be32(cmd + 3);
3466 partition = devip->tape_partition;
3467
3468 for (i = 0, blp = devip->tape_blocks[partition];
3469 i < pos && i < devip->tape_eop[partition]; i++, blp++)
3470 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3471 break;
3472 if (i < pos) {
3473 devip->tape_location[partition] = i;
3474 mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3475 return check_condition_result;
3476 }
3477 devip->tape_location[partition] = pos;
3478
3479 return 0;
3480}
3481
3482static int resp_write_filemarks(struct scsi_cmnd *scp,
3483 struct sdebug_dev_info *devip)
3484{
3485 unsigned char *cmd = scp->cmnd;
3486 unsigned int i, count, pos;
3487 u32 data;
3488 int partition = devip->tape_partition;
3489
3490 if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3491 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3492 return check_condition_result;
3493 }
3494 count = get_unaligned_be24(cmd + 2);
3495 data = TAPE_BLOCK_FM_FLAG;
3496 for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3497 if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3498 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3499 mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3500 EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3501 return check_condition_result;
3502 }
3503 (devip->tape_blocks[partition] + pos)->fl_size = data;
3504 }
3505 (devip->tape_blocks[partition] + pos)->fl_size =
3506 TAPE_BLOCK_EOD_FLAG;
3507 devip->tape_location[partition] = pos;
3508
3509 return 0;
3510}
3511
3512static int resp_space(struct scsi_cmnd *scp,
3513 struct sdebug_dev_info *devip)
3514{
3515 unsigned char *cmd = scp->cmnd, code;
3516 int i = 0, pos, count;
3517 struct tape_block *blp;
3518 int partition = devip->tape_partition;
3519
3520 count = get_unaligned_be24(cmd + 2);
3521 if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3522 count |= 0xff000000;
3523 code = cmd[1] & 0x0f;
3524
3525 pos = devip->tape_location[partition];
3526 if (code == 0) { /* blocks */
3527 if (count < 0) {
3528 count = (-count);
3529 pos -= 1;
3530 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3531 i++) {
3532 if (pos < 0)
3533 goto is_bop;
3534 else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3535 goto is_fm;
3536 if (i > 0) {
3537 pos--;
3538 blp--;
3539 }
3540 }
3541 } else if (count > 0) {
3542 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3543 i++, pos++, blp++) {
3544 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3545 goto is_eod;
3546 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3547 pos += 1;
3548 goto is_fm;
3549 }
3550 if (pos >= devip->tape_eop[partition])
3551 goto is_eop;
3552 }
3553 }
3554 } else if (code == 1) { /* filemarks */
3555 if (count < 0) {
3556 count = (-count);
3557 if (pos == 0)
3558 goto is_bop;
3559 else {
3560 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3561 i < count && pos >= 0; i++, pos--, blp--) {
3562 for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3563 pos >= 0; pos--, blp--)
3564 ; /* empty */
3565 if (pos < 0)
3566 goto is_bop;
3567 }
3568 }
3569 pos += 1;
3570 } else if (count > 0) {
3571 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3572 i < count; i++, pos++, blp++) {
3573 for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3574 !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3575 pos < devip->tape_eop[partition];
3576 pos++, blp++)
3577 ; /* empty */
3578 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3579 goto is_eod;
3580 if (pos >= devip->tape_eop[partition])
3581 goto is_eop;
3582 }
3583 }
3584 } else if (code == 3) { /* EOD */
3585 for (blp = devip->tape_blocks[partition] + pos;
3586 !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3587 pos++, blp++)
3588 ; /* empty */
3589 if (pos >= devip->tape_eop[partition])
3590 goto is_eop;
3591 } else {
3592 /* sequential filemarks not supported */
3593 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3594 return check_condition_result;
3595 }
3596 devip->tape_location[partition] = pos;
3597 return 0;
3598
3599is_fm:
3600 devip->tape_location[partition] = pos;
3601 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3602 FILEMARK_DETECTED_ASCQ, count - i,
3603 SENSE_FLAG_FILEMARK);
3604 return check_condition_result;
3605
3606is_eod:
3607 devip->tape_location[partition] = pos;
3608 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3609 EOD_DETECTED_ASCQ, count - i,
3610 0);
3611 return check_condition_result;
3612
3613is_bop:
3614 devip->tape_location[partition] = 0;
3615 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3616 BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3617 SENSE_FLAG_EOM);
3618 devip->tape_location[partition] = 0;
3619 return check_condition_result;
3620
3621is_eop:
3622 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3623 mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3624 EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3625 SENSE_FLAG_EOM);
3626 return check_condition_result;
3627}
3628
3629enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
3630static int resp_read_position(struct scsi_cmnd *scp,
3631 struct sdebug_dev_info *devip)
3632{
3633 u8 *cmd = scp->cmnd;
3634 int all_length;
3635 unsigned char arr[20];
3636 unsigned int pos;
3637
3638 all_length = get_unaligned_be16(cmd + 7);
3639 if ((cmd[1] & 0xfe) != 0 ||
3640 all_length != 0) { /* only short form */
3641 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3642 all_length ? 7 : 1, 0);
3643 return check_condition_result;
3644 }
3645 memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3646 arr[1] = devip->tape_partition;
3647 pos = devip->tape_location[devip->tape_partition];
3648 put_unaligned_be32(pos, arr + 4);
3649 put_unaligned_be32(pos, arr + 8);
3650 return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3651}
3652
3653static int resp_rewind(struct scsi_cmnd *scp,
3654 struct sdebug_dev_info *devip)
3655{
3656 devip->tape_location[devip->tape_partition] = 0;
3657
3658 return 0;
3659}
3660
3661static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3662 int part_0_size, int part_1_size)
3663{
3664 int i;
3665
3666 if (part_0_size + part_1_size > TAPE_UNITS)
3667 return -1;
3668 devip->tape_eop[0] = part_0_size;
3669 devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3670 devip->tape_eop[1] = part_1_size;
3671 devip->tape_blocks[1] = devip->tape_blocks[0] +
3672 devip->tape_eop[0];
3673 devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3674
3675 for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3676 devip->tape_location[i] = 0;
3677
3678 devip->tape_nbr_partitions = nbr_partitions;
3679 devip->tape_partition = 0;
3680
3681 partition_pg[3] = nbr_partitions - 1;
3682 put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3683 put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3684
3685 return nbr_partitions;
3686}
3687
3688static int resp_format_medium(struct scsi_cmnd *scp,
3689 struct sdebug_dev_info *devip)
3690{
3691 int res = 0;
3692 unsigned char *cmd = scp->cmnd;
3693
3694 if (cmd[2] > 2) {
3695 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3696 return check_condition_result;
3697 }
3698 if (cmd[2] != 0) {
3699 if (devip->tape_pending_nbr_partitions > 0) {
3700 res = partition_tape(devip,
3701 devip->tape_pending_nbr_partitions,
3702 devip->tape_pending_part_0_size,
3703 devip->tape_pending_part_1_size);
3704 } else
3705 res = partition_tape(devip, devip->tape_nbr_partitions,
3706 devip->tape_eop[0], devip->tape_eop[1]);
3707 } else
3708 res = partition_tape(devip, 1, TAPE_UNITS, 0);
3709 if (res < 0)
3710 return -EINVAL;
3711
3712 devip->tape_pending_nbr_partitions = -1;
3713
3714 return 0;
3715}
3716
3717static int resp_erase(struct scsi_cmnd *scp,
3718 struct sdebug_dev_info *devip)
3719{
3720 int partition = devip->tape_partition;
3721 int pos = devip->tape_location[partition];
3722 struct tape_block *blp;
3723
3724 blp = devip->tape_blocks[partition] + pos;
3725 blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3726
3727 return 0;
3728}
3729
3730static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3731{
3732 return devip->nr_zones != 0;
3733}
3734
3735static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3736 unsigned long long lba)
3737{
3738 u32 zno = lba >> devip->zsize_shift;
3739 struct sdeb_zone_state *zsp;
3740
3741 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3742 return &devip->zstate[zno];
3743
3744 /*
3745 * If the zone capacity is less than the zone size, adjust for gap
3746 * zones.
3747 */
3748 zno = 2 * zno - devip->nr_conv_zones;
3749 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3750 zsp = &devip->zstate[zno];
3751 if (lba >= zsp->z_start + zsp->z_size)
3752 zsp++;
3753 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3754 return zsp;
3755}
3756
3757static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3758{
3759 return zsp->z_type == ZBC_ZTYPE_CNV;
3760}
3761
3762static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3763{
3764 return zsp->z_type == ZBC_ZTYPE_GAP;
3765}
3766
3767static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3768{
3769 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3770}
3771
3772static void zbc_close_zone(struct sdebug_dev_info *devip,
3773 struct sdeb_zone_state *zsp)
3774{
3775 enum sdebug_z_cond zc;
3776
3777 if (!zbc_zone_is_seq(zsp))
3778 return;
3779
3780 zc = zsp->z_cond;
3781 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3782 return;
3783
3784 if (zc == ZC2_IMPLICIT_OPEN)
3785 devip->nr_imp_open--;
3786 else
3787 devip->nr_exp_open--;
3788
3789 if (zsp->z_wp == zsp->z_start) {
3790 zsp->z_cond = ZC1_EMPTY;
3791 } else {
3792 zsp->z_cond = ZC4_CLOSED;
3793 devip->nr_closed++;
3794 }
3795}
3796
3797static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3798{
3799 struct sdeb_zone_state *zsp = &devip->zstate[0];
3800 unsigned int i;
3801
3802 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3803 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3804 zbc_close_zone(devip, zsp);
3805 return;
3806 }
3807 }
3808}
3809
3810static void zbc_open_zone(struct sdebug_dev_info *devip,
3811 struct sdeb_zone_state *zsp, bool explicit)
3812{
3813 enum sdebug_z_cond zc;
3814
3815 if (!zbc_zone_is_seq(zsp))
3816 return;
3817
3818 zc = zsp->z_cond;
3819 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3820 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3821 return;
3822
3823 /* Close an implicit open zone if necessary */
3824 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3825 zbc_close_zone(devip, zsp);
3826 else if (devip->max_open &&
3827 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3828 zbc_close_imp_open_zone(devip);
3829
3830 if (zsp->z_cond == ZC4_CLOSED)
3831 devip->nr_closed--;
3832 if (explicit) {
3833 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3834 devip->nr_exp_open++;
3835 } else {
3836 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3837 devip->nr_imp_open++;
3838 }
3839}
3840
3841static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3842 struct sdeb_zone_state *zsp)
3843{
3844 switch (zsp->z_cond) {
3845 case ZC2_IMPLICIT_OPEN:
3846 devip->nr_imp_open--;
3847 break;
3848 case ZC3_EXPLICIT_OPEN:
3849 devip->nr_exp_open--;
3850 break;
3851 default:
3852 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3853 zsp->z_start, zsp->z_cond);
3854 break;
3855 }
3856 zsp->z_cond = ZC5_FULL;
3857}
3858
3859static void zbc_inc_wp(struct sdebug_dev_info *devip,
3860 unsigned long long lba, unsigned int num)
3861{
3862 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3863 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3864
3865 if (!zbc_zone_is_seq(zsp))
3866 return;
3867
3868 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3869 zsp->z_wp += num;
3870 if (zsp->z_wp >= zend)
3871 zbc_set_zone_full(devip, zsp);
3872 return;
3873 }
3874
3875 while (num) {
3876 if (lba != zsp->z_wp)
3877 zsp->z_non_seq_resource = true;
3878
3879 end = lba + num;
3880 if (end >= zend) {
3881 n = zend - lba;
3882 zsp->z_wp = zend;
3883 } else if (end > zsp->z_wp) {
3884 n = num;
3885 zsp->z_wp = end;
3886 } else {
3887 n = num;
3888 }
3889 if (zsp->z_wp >= zend)
3890 zbc_set_zone_full(devip, zsp);
3891
3892 num -= n;
3893 lba += n;
3894 if (num) {
3895 zsp++;
3896 zend = zsp->z_start + zsp->z_size;
3897 }
3898 }
3899}
3900
3901static int check_zbc_access_params(struct scsi_cmnd *scp,
3902 unsigned long long lba, unsigned int num, bool write)
3903{
3904 struct scsi_device *sdp = scp->device;
3905 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3906 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3907 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3908
3909 if (!write) {
3910 /* For host-managed, reads cannot cross zone types boundaries */
3911 if (zsp->z_type != zsp_end->z_type) {
3912 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3913 LBA_OUT_OF_RANGE,
3914 READ_INVDATA_ASCQ);
3915 return check_condition_result;
3916 }
3917 return 0;
3918 }
3919
3920 /* Writing into a gap zone is not allowed */
3921 if (zbc_zone_is_gap(zsp)) {
3922 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3923 ATTEMPT_ACCESS_GAP);
3924 return check_condition_result;
3925 }
3926
3927 /* No restrictions for writes within conventional zones */
3928 if (zbc_zone_is_conv(zsp)) {
3929 if (!zbc_zone_is_conv(zsp_end)) {
3930 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3931 LBA_OUT_OF_RANGE,
3932 WRITE_BOUNDARY_ASCQ);
3933 return check_condition_result;
3934 }
3935 return 0;
3936 }
3937
3938 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3939 /* Writes cannot cross sequential zone boundaries */
3940 if (zsp_end != zsp) {
3941 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3942 LBA_OUT_OF_RANGE,
3943 WRITE_BOUNDARY_ASCQ);
3944 return check_condition_result;
3945 }
3946 /* Cannot write full zones */
3947 if (zsp->z_cond == ZC5_FULL) {
3948 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3949 INVALID_FIELD_IN_CDB, 0);
3950 return check_condition_result;
3951 }
3952 /* Writes must be aligned to the zone WP */
3953 if (lba != zsp->z_wp) {
3954 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3955 LBA_OUT_OF_RANGE,
3956 UNALIGNED_WRITE_ASCQ);
3957 return check_condition_result;
3958 }
3959 }
3960
3961 /* Handle implicit open of closed and empty zones */
3962 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3963 if (devip->max_open &&
3964 devip->nr_exp_open >= devip->max_open) {
3965 mk_sense_buffer(scp, DATA_PROTECT,
3966 INSUFF_RES_ASC,
3967 INSUFF_ZONE_ASCQ);
3968 return check_condition_result;
3969 }
3970 zbc_open_zone(devip, zsp, false);
3971 }
3972
3973 return 0;
3974}
3975
3976static inline int check_device_access_params
3977 (struct scsi_cmnd *scp, unsigned long long lba,
3978 unsigned int num, bool write)
3979{
3980 struct scsi_device *sdp = scp->device;
3981 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3982
3983 if (lba + num > sdebug_capacity) {
3984 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3985 return check_condition_result;
3986 }
3987 /* transfer length excessive (tie in to block limits VPD page) */
3988 if (num > sdebug_store_sectors) {
3989 /* needs work to find which cdb byte 'num' comes from */
3990 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3991 return check_condition_result;
3992 }
3993 if (write && unlikely(sdebug_wp)) {
3994 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3995 return check_condition_result;
3996 }
3997 if (sdebug_dev_is_zoned(devip))
3998 return check_zbc_access_params(scp, lba, num, write);
3999
4000 return 0;
4001}
4002
4003/*
4004 * Note: if BUG_ON() fires it usually indicates a problem with the parser
4005 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
4006 * that access any of the "stores" in struct sdeb_store_info should call this
4007 * function with bug_if_fake_rw set to true.
4008 */
4009static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
4010 bool bug_if_fake_rw)
4011{
4012 if (sdebug_fake_rw) {
4013 BUG_ON(bug_if_fake_rw); /* See note above */
4014 return NULL;
4015 }
4016 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4017}
4018
4019static inline void
4020sdeb_read_lock(rwlock_t *lock)
4021{
4022 if (sdebug_no_rwlock)
4023 __acquire(lock);
4024 else
4025 read_lock(lock);
4026}
4027
4028static inline void
4029sdeb_read_unlock(rwlock_t *lock)
4030{
4031 if (sdebug_no_rwlock)
4032 __release(lock);
4033 else
4034 read_unlock(lock);
4035}
4036
4037static inline void
4038sdeb_write_lock(rwlock_t *lock)
4039{
4040 if (sdebug_no_rwlock)
4041 __acquire(lock);
4042 else
4043 write_lock(lock);
4044}
4045
4046static inline void
4047sdeb_write_unlock(rwlock_t *lock)
4048{
4049 if (sdebug_no_rwlock)
4050 __release(lock);
4051 else
4052 write_unlock(lock);
4053}
4054
4055static inline void
4056sdeb_data_read_lock(struct sdeb_store_info *sip)
4057{
4058 BUG_ON(!sip);
4059
4060 sdeb_read_lock(&sip->macc_data_lck);
4061}
4062
4063static inline void
4064sdeb_data_read_unlock(struct sdeb_store_info *sip)
4065{
4066 BUG_ON(!sip);
4067
4068 sdeb_read_unlock(&sip->macc_data_lck);
4069}
4070
4071static inline void
4072sdeb_data_write_lock(struct sdeb_store_info *sip)
4073{
4074 BUG_ON(!sip);
4075
4076 sdeb_write_lock(&sip->macc_data_lck);
4077}
4078
4079static inline void
4080sdeb_data_write_unlock(struct sdeb_store_info *sip)
4081{
4082 BUG_ON(!sip);
4083
4084 sdeb_write_unlock(&sip->macc_data_lck);
4085}
4086
4087static inline void
4088sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4089{
4090 BUG_ON(!sip);
4091
4092 sdeb_read_lock(&sip->macc_sector_lck);
4093}
4094
4095static inline void
4096sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4097{
4098 BUG_ON(!sip);
4099
4100 sdeb_read_unlock(&sip->macc_sector_lck);
4101}
4102
4103static inline void
4104sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4105{
4106 BUG_ON(!sip);
4107
4108 sdeb_write_lock(&sip->macc_sector_lck);
4109}
4110
4111static inline void
4112sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4113{
4114 BUG_ON(!sip);
4115
4116 sdeb_write_unlock(&sip->macc_sector_lck);
4117}
4118
4119/*
4120 * Atomic locking:
4121 * We simplify the atomic model to allow only 1x atomic write and many non-
4122 * atomic reads or writes for all LBAs.
4123
4124 * A RW lock has a similar bahaviour:
4125 * Only 1x writer and many readers.
4126
4127 * So use a RW lock for per-device read and write locking:
4128 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4129 * as a reader.
4130 */
4131
4132static inline void
4133sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4134{
4135 if (atomic)
4136 sdeb_data_write_lock(sip);
4137 else
4138 sdeb_data_read_lock(sip);
4139}
4140
4141static inline void
4142sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4143{
4144 if (atomic)
4145 sdeb_data_write_unlock(sip);
4146 else
4147 sdeb_data_read_unlock(sip);
4148}
4149
4150/* Allow many reads but only 1x write per sector */
4151static inline void
4152sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4153{
4154 if (do_write)
4155 sdeb_data_sector_write_lock(sip);
4156 else
4157 sdeb_data_sector_read_lock(sip);
4158}
4159
4160static inline void
4161sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4162{
4163 if (do_write)
4164 sdeb_data_sector_write_unlock(sip);
4165 else
4166 sdeb_data_sector_read_unlock(sip);
4167}
4168
4169static inline void
4170sdeb_meta_read_lock(struct sdeb_store_info *sip)
4171{
4172 if (sdebug_no_rwlock) {
4173 if (sip)
4174 __acquire(&sip->macc_meta_lck);
4175 else
4176 __acquire(&sdeb_fake_rw_lck);
4177 } else {
4178 if (sip)
4179 read_lock(&sip->macc_meta_lck);
4180 else
4181 read_lock(&sdeb_fake_rw_lck);
4182 }
4183}
4184
4185static inline void
4186sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4187{
4188 if (sdebug_no_rwlock) {
4189 if (sip)
4190 __release(&sip->macc_meta_lck);
4191 else
4192 __release(&sdeb_fake_rw_lck);
4193 } else {
4194 if (sip)
4195 read_unlock(&sip->macc_meta_lck);
4196 else
4197 read_unlock(&sdeb_fake_rw_lck);
4198 }
4199}
4200
4201static inline void
4202sdeb_meta_write_lock(struct sdeb_store_info *sip)
4203{
4204 if (sdebug_no_rwlock) {
4205 if (sip)
4206 __acquire(&sip->macc_meta_lck);
4207 else
4208 __acquire(&sdeb_fake_rw_lck);
4209 } else {
4210 if (sip)
4211 write_lock(&sip->macc_meta_lck);
4212 else
4213 write_lock(&sdeb_fake_rw_lck);
4214 }
4215}
4216
4217static inline void
4218sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4219{
4220 if (sdebug_no_rwlock) {
4221 if (sip)
4222 __release(&sip->macc_meta_lck);
4223 else
4224 __release(&sdeb_fake_rw_lck);
4225 } else {
4226 if (sip)
4227 write_unlock(&sip->macc_meta_lck);
4228 else
4229 write_unlock(&sdeb_fake_rw_lck);
4230 }
4231}
4232
4233/* Returns number of bytes copied or -1 if error. */
4234static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4235 u32 sg_skip, u64 lba, u32 num, u8 group_number,
4236 bool do_write, bool atomic)
4237{
4238 int ret;
4239 u64 block;
4240 enum dma_data_direction dir;
4241 struct scsi_data_buffer *sdb = &scp->sdb;
4242 u8 *fsp;
4243 int i, total = 0;
4244
4245 /*
4246 * Even though reads are inherently atomic (in this driver), we expect
4247 * the atomic flag only for writes.
4248 */
4249 if (!do_write && atomic)
4250 return -1;
4251
4252 if (do_write) {
4253 dir = DMA_TO_DEVICE;
4254 write_since_sync = true;
4255 } else {
4256 dir = DMA_FROM_DEVICE;
4257 }
4258
4259 if (!sdb->length || !sip)
4260 return 0;
4261 if (scp->sc_data_direction != dir)
4262 return -1;
4263
4264 if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4265 atomic_long_inc(&writes_by_group_number[group_number]);
4266
4267 fsp = sip->storep;
4268
4269 block = do_div(lba, sdebug_store_sectors);
4270
4271 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4272 sdeb_data_lock(sip, atomic);
4273 for (i = 0; i < num; i++) {
4274 /* We shouldn't need to lock for atomic writes, but do it anyway */
4275 sdeb_data_sector_lock(sip, do_write);
4276 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4277 fsp + (block * sdebug_sector_size),
4278 sdebug_sector_size, sg_skip, do_write);
4279 sdeb_data_sector_unlock(sip, do_write);
4280 total += ret;
4281 if (ret != sdebug_sector_size)
4282 break;
4283 sg_skip += sdebug_sector_size;
4284 if (++block >= sdebug_store_sectors)
4285 block = 0;
4286 }
4287 sdeb_data_unlock(sip, atomic);
4288
4289 return total;
4290}
4291
4292/* Returns number of bytes copied or -1 if error. */
4293static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4294{
4295 struct scsi_data_buffer *sdb = &scp->sdb;
4296
4297 if (!sdb->length)
4298 return 0;
4299 if (scp->sc_data_direction != DMA_TO_DEVICE)
4300 return -1;
4301 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4302 num * sdebug_sector_size, 0, true);
4303}
4304
4305/* If sip->storep+lba compares equal to arr(num), then copy top half of
4306 * arr into sip->storep+lba and return true. If comparison fails then
4307 * return false. */
4308static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4309 const u8 *arr, bool compare_only)
4310{
4311 bool res;
4312 u64 block, rest = 0;
4313 u32 store_blks = sdebug_store_sectors;
4314 u32 lb_size = sdebug_sector_size;
4315 u8 *fsp = sip->storep;
4316
4317 block = do_div(lba, store_blks);
4318 if (block + num > store_blks)
4319 rest = block + num - store_blks;
4320
4321 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4322 if (!res)
4323 return res;
4324 if (rest)
4325 res = memcmp(fsp, arr + ((num - rest) * lb_size),
4326 rest * lb_size);
4327 if (!res)
4328 return res;
4329 if (compare_only)
4330 return true;
4331 arr += num * lb_size;
4332 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4333 if (rest)
4334 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4335 return res;
4336}
4337
4338static __be16 dif_compute_csum(const void *buf, int len)
4339{
4340 __be16 csum;
4341
4342 if (sdebug_guard)
4343 csum = (__force __be16)ip_compute_csum(buf, len);
4344 else
4345 csum = cpu_to_be16(crc_t10dif(buf, len));
4346
4347 return csum;
4348}
4349
4350static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4351 sector_t sector, u32 ei_lba)
4352{
4353 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
4354
4355 if (sdt->guard_tag != csum) {
4356 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4357 (unsigned long)sector,
4358 be16_to_cpu(sdt->guard_tag),
4359 be16_to_cpu(csum));
4360 return 0x01;
4361 }
4362 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4363 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4364 pr_err("REF check failed on sector %lu\n",
4365 (unsigned long)sector);
4366 return 0x03;
4367 }
4368 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4369 be32_to_cpu(sdt->ref_tag) != ei_lba) {
4370 pr_err("REF check failed on sector %lu\n",
4371 (unsigned long)sector);
4372 return 0x03;
4373 }
4374 return 0;
4375}
4376
4377static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4378 unsigned int sectors, bool read)
4379{
4380 size_t resid;
4381 void *paddr;
4382 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4383 scp->device->hostdata, true);
4384 struct t10_pi_tuple *dif_storep = sip->dif_storep;
4385 const void *dif_store_end = dif_storep + sdebug_store_sectors;
4386 struct sg_mapping_iter miter;
4387
4388 /* Bytes of protection data to copy into sgl */
4389 resid = sectors * sizeof(*dif_storep);
4390
4391 sg_miter_start(&miter, scsi_prot_sglist(scp),
4392 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4393 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4394
4395 while (sg_miter_next(&miter) && resid > 0) {
4396 size_t len = min_t(size_t, miter.length, resid);
4397 void *start = dif_store(sip, sector);
4398 size_t rest = 0;
4399
4400 if (dif_store_end < start + len)
4401 rest = start + len - dif_store_end;
4402
4403 paddr = miter.addr;
4404
4405 if (read)
4406 memcpy(paddr, start, len - rest);
4407 else
4408 memcpy(start, paddr, len - rest);
4409
4410 if (rest) {
4411 if (read)
4412 memcpy(paddr + len - rest, dif_storep, rest);
4413 else
4414 memcpy(dif_storep, paddr + len - rest, rest);
4415 }
4416
4417 sector += len / sizeof(*dif_storep);
4418 resid -= len;
4419 }
4420 sg_miter_stop(&miter);
4421}
4422
4423static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4424 unsigned int sectors, u32 ei_lba)
4425{
4426 int ret = 0;
4427 unsigned int i;
4428 sector_t sector;
4429 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4430 scp->device->hostdata, true);
4431 struct t10_pi_tuple *sdt;
4432
4433 for (i = 0; i < sectors; i++, ei_lba++) {
4434 sector = start_sec + i;
4435 sdt = dif_store(sip, sector);
4436
4437 if (sdt->app_tag == cpu_to_be16(0xffff))
4438 continue;
4439
4440 /*
4441 * Because scsi_debug acts as both initiator and
4442 * target we proceed to verify the PI even if
4443 * RDPROTECT=3. This is done so the "initiator" knows
4444 * which type of error to return. Otherwise we would
4445 * have to iterate over the PI twice.
4446 */
4447 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4448 ret = dif_verify(sdt, lba2fake_store(sip, sector),
4449 sector, ei_lba);
4450 if (ret) {
4451 dif_errors++;
4452 break;
4453 }
4454 }
4455 }
4456
4457 dif_copy_prot(scp, start_sec, sectors, true);
4458 dix_reads++;
4459
4460 return ret;
4461}
4462
4463static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4464{
4465 u32 i, num, transfer, size;
4466 u8 *cmd = scp->cmnd;
4467 struct scsi_data_buffer *sdb = &scp->sdb;
4468 int partition = devip->tape_partition;
4469 u32 pos = devip->tape_location[partition];
4470 struct tape_block *blp;
4471 bool fixed, sili;
4472
4473 if (cmd[0] != READ_6) { /* Only Read(6) supported */
4474 mk_sense_invalid_opcode(scp);
4475 return illegal_condition_result;
4476 }
4477 fixed = (cmd[1] & 0x1) != 0;
4478 sili = (cmd[1] & 0x2) != 0;
4479 if (fixed && sili) {
4480 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4481 return check_condition_result;
4482 }
4483
4484 transfer = get_unaligned_be24(cmd + 2);
4485 if (fixed) {
4486 num = transfer;
4487 size = devip->tape_blksize;
4488 } else {
4489 if (transfer < TAPE_MIN_BLKSIZE ||
4490 transfer > TAPE_MAX_BLKSIZE) {
4491 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4492 return check_condition_result;
4493 }
4494 num = 1;
4495 size = transfer;
4496 }
4497
4498 for (i = 0, blp = devip->tape_blocks[partition] + pos;
4499 i < num && pos < devip->tape_eop[partition];
4500 i++, pos++, blp++) {
4501 devip->tape_location[partition] = pos + 1;
4502 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4503 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4504 FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4505 SENSE_FLAG_FILEMARK);
4506 scsi_set_resid(scp, (num - i) * size);
4507 return check_condition_result;
4508 }
4509 /* Assume no REW */
4510 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4511 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4512 EOD_DETECTED_ASCQ, fixed ? num - i : size,
4513 0);
4514 devip->tape_location[partition] = pos;
4515 scsi_set_resid(scp, (num - i) * size);
4516 return check_condition_result;
4517 }
4518 sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4519 size, i * size);
4520 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4521 &(blp->data), 4, i * size, false);
4522 if (fixed) {
4523 if (blp->fl_size != devip->tape_blksize) {
4524 scsi_set_resid(scp, (num - i) * size);
4525 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4526 0, num - i,
4527 SENSE_FLAG_ILI);
4528 return check_condition_result;
4529 }
4530 } else {
4531 if (blp->fl_size != size) {
4532 if (blp->fl_size < size)
4533 scsi_set_resid(scp, size - blp->fl_size);
4534 if (!sili) {
4535 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4536 0, size - blp->fl_size,
4537 SENSE_FLAG_ILI);
4538 return check_condition_result;
4539 }
4540 }
4541 }
4542 }
4543 if (pos >= devip->tape_eop[partition]) {
4544 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4545 EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4546 SENSE_FLAG_EOM);
4547 devip->tape_location[partition] = pos - 1;
4548 return check_condition_result;
4549 }
4550 devip->tape_location[partition] = pos;
4551
4552 return 0;
4553}
4554
4555static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4556{
4557 bool check_prot;
4558 u32 num;
4559 u32 ei_lba;
4560 int ret;
4561 u64 lba;
4562 struct sdeb_store_info *sip = devip2sip(devip, true);
4563 u8 *cmd = scp->cmnd;
4564 bool meta_data_locked = false;
4565
4566 switch (cmd[0]) {
4567 case READ_16:
4568 ei_lba = 0;
4569 lba = get_unaligned_be64(cmd + 2);
4570 num = get_unaligned_be32(cmd + 10);
4571 check_prot = true;
4572 break;
4573 case READ_10:
4574 ei_lba = 0;
4575 lba = get_unaligned_be32(cmd + 2);
4576 num = get_unaligned_be16(cmd + 7);
4577 check_prot = true;
4578 break;
4579 case READ_6:
4580 ei_lba = 0;
4581 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4582 (u32)(cmd[1] & 0x1f) << 16;
4583 num = (0 == cmd[4]) ? 256 : cmd[4];
4584 check_prot = true;
4585 break;
4586 case READ_12:
4587 ei_lba = 0;
4588 lba = get_unaligned_be32(cmd + 2);
4589 num = get_unaligned_be32(cmd + 6);
4590 check_prot = true;
4591 break;
4592 case XDWRITEREAD_10:
4593 ei_lba = 0;
4594 lba = get_unaligned_be32(cmd + 2);
4595 num = get_unaligned_be16(cmd + 7);
4596 check_prot = false;
4597 break;
4598 default: /* assume READ(32) */
4599 lba = get_unaligned_be64(cmd + 12);
4600 ei_lba = get_unaligned_be32(cmd + 20);
4601 num = get_unaligned_be32(cmd + 28);
4602 check_prot = false;
4603 break;
4604 }
4605 if (unlikely(have_dif_prot && check_prot)) {
4606 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4607 (cmd[1] & 0xe0)) {
4608 mk_sense_invalid_opcode(scp);
4609 return check_condition_result;
4610 }
4611 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4612 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4613 (cmd[1] & 0xe0) == 0)
4614 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4615 "to DIF device\n");
4616 }
4617 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4618 atomic_read(&sdeb_inject_pending))) {
4619 num /= 2;
4620 atomic_set(&sdeb_inject_pending, 0);
4621 }
4622
4623 /*
4624 * When checking device access params, for reads we only check data
4625 * versus what is set at init time, so no need to lock.
4626 */
4627 ret = check_device_access_params(scp, lba, num, false);
4628 if (ret)
4629 return ret;
4630 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4631 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4632 ((lba + num) > sdebug_medium_error_start))) {
4633 /* claim unrecoverable read error */
4634 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4635 /* set info field and valid bit for fixed descriptor */
4636 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4637 scp->sense_buffer[0] |= 0x80; /* Valid bit */
4638 ret = (lba < OPT_MEDIUM_ERR_ADDR)
4639 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4640 put_unaligned_be32(ret, scp->sense_buffer + 3);
4641 }
4642 scsi_set_resid(scp, scsi_bufflen(scp));
4643 return check_condition_result;
4644 }
4645
4646 if (sdebug_dev_is_zoned(devip) ||
4647 (sdebug_dix && scsi_prot_sg_count(scp))) {
4648 sdeb_meta_read_lock(sip);
4649 meta_data_locked = true;
4650 }
4651
4652 /* DIX + T10 DIF */
4653 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4654 switch (prot_verify_read(scp, lba, num, ei_lba)) {
4655 case 1: /* Guard tag error */
4656 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4657 sdeb_meta_read_unlock(sip);
4658 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4659 return check_condition_result;
4660 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4661 sdeb_meta_read_unlock(sip);
4662 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4663 return illegal_condition_result;
4664 }
4665 break;
4666 case 3: /* Reference tag error */
4667 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4668 sdeb_meta_read_unlock(sip);
4669 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4670 return check_condition_result;
4671 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4672 sdeb_meta_read_unlock(sip);
4673 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4674 return illegal_condition_result;
4675 }
4676 break;
4677 }
4678 }
4679
4680 ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4681 if (meta_data_locked)
4682 sdeb_meta_read_unlock(sip);
4683 if (unlikely(ret == -1))
4684 return DID_ERROR << 16;
4685
4686 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4687
4688 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4689 atomic_read(&sdeb_inject_pending))) {
4690 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4691 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4692 atomic_set(&sdeb_inject_pending, 0);
4693 return check_condition_result;
4694 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4695 /* Logical block guard check failed */
4696 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4697 atomic_set(&sdeb_inject_pending, 0);
4698 return illegal_condition_result;
4699 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4700 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4701 atomic_set(&sdeb_inject_pending, 0);
4702 return illegal_condition_result;
4703 }
4704 }
4705 return 0;
4706}
4707
4708static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4709 unsigned int sectors, u32 ei_lba)
4710{
4711 int ret;
4712 struct t10_pi_tuple *sdt;
4713 void *daddr;
4714 sector_t sector = start_sec;
4715 int ppage_offset;
4716 int dpage_offset;
4717 struct sg_mapping_iter diter;
4718 struct sg_mapping_iter piter;
4719
4720 BUG_ON(scsi_sg_count(SCpnt) == 0);
4721 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4722
4723 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4724 scsi_prot_sg_count(SCpnt),
4725 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4726 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4727 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4728
4729 /* For each protection page */
4730 while (sg_miter_next(&piter)) {
4731 dpage_offset = 0;
4732 if (WARN_ON(!sg_miter_next(&diter))) {
4733 ret = 0x01;
4734 goto out;
4735 }
4736
4737 for (ppage_offset = 0; ppage_offset < piter.length;
4738 ppage_offset += sizeof(struct t10_pi_tuple)) {
4739 /* If we're at the end of the current
4740 * data page advance to the next one
4741 */
4742 if (dpage_offset >= diter.length) {
4743 if (WARN_ON(!sg_miter_next(&diter))) {
4744 ret = 0x01;
4745 goto out;
4746 }
4747 dpage_offset = 0;
4748 }
4749
4750 sdt = piter.addr + ppage_offset;
4751 daddr = diter.addr + dpage_offset;
4752
4753 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4754 ret = dif_verify(sdt, daddr, sector, ei_lba);
4755 if (ret)
4756 goto out;
4757 }
4758
4759 sector++;
4760 ei_lba++;
4761 dpage_offset += sdebug_sector_size;
4762 }
4763 diter.consumed = dpage_offset;
4764 sg_miter_stop(&diter);
4765 }
4766 sg_miter_stop(&piter);
4767
4768 dif_copy_prot(SCpnt, start_sec, sectors, false);
4769 dix_writes++;
4770
4771 return 0;
4772
4773out:
4774 dif_errors++;
4775 sg_miter_stop(&diter);
4776 sg_miter_stop(&piter);
4777 return ret;
4778}
4779
4780static unsigned long lba_to_map_index(sector_t lba)
4781{
4782 if (sdebug_unmap_alignment)
4783 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4784 sector_div(lba, sdebug_unmap_granularity);
4785 return lba;
4786}
4787
4788static sector_t map_index_to_lba(unsigned long index)
4789{
4790 sector_t lba = index * sdebug_unmap_granularity;
4791
4792 if (sdebug_unmap_alignment)
4793 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4794 return lba;
4795}
4796
4797static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4798 unsigned int *num)
4799{
4800 sector_t end;
4801 unsigned int mapped;
4802 unsigned long index;
4803 unsigned long next;
4804
4805 index = lba_to_map_index(lba);
4806 mapped = test_bit(index, sip->map_storep);
4807
4808 if (mapped)
4809 next = find_next_zero_bit(sip->map_storep, map_size, index);
4810 else
4811 next = find_next_bit(sip->map_storep, map_size, index);
4812
4813 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
4814 *num = end - lba;
4815 return mapped;
4816}
4817
4818static void map_region(struct sdeb_store_info *sip, sector_t lba,
4819 unsigned int len)
4820{
4821 sector_t end = lba + len;
4822
4823 while (lba < end) {
4824 unsigned long index = lba_to_map_index(lba);
4825
4826 if (index < map_size)
4827 set_bit(index, sip->map_storep);
4828
4829 lba = map_index_to_lba(index + 1);
4830 }
4831}
4832
4833static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4834 unsigned int len)
4835{
4836 sector_t end = lba + len;
4837 u8 *fsp = sip->storep;
4838
4839 while (lba < end) {
4840 unsigned long index = lba_to_map_index(lba);
4841
4842 if (lba == map_index_to_lba(index) &&
4843 lba + sdebug_unmap_granularity <= end &&
4844 index < map_size) {
4845 clear_bit(index, sip->map_storep);
4846 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
4847 memset(fsp + lba * sdebug_sector_size,
4848 (sdebug_lbprz & 1) ? 0 : 0xff,
4849 sdebug_sector_size *
4850 sdebug_unmap_granularity);
4851 }
4852 if (sip->dif_storep) {
4853 memset(sip->dif_storep + lba, 0xff,
4854 sizeof(*sip->dif_storep) *
4855 sdebug_unmap_granularity);
4856 }
4857 }
4858 lba = map_index_to_lba(index + 1);
4859 }
4860}
4861
4862static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4863{
4864 u32 i, num, transfer, size, written = 0;
4865 u8 *cmd = scp->cmnd;
4866 struct scsi_data_buffer *sdb = &scp->sdb;
4867 int partition = devip->tape_partition;
4868 int pos = devip->tape_location[partition];
4869 struct tape_block *blp;
4870 bool fixed, ew;
4871
4872 if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4873 mk_sense_invalid_opcode(scp);
4874 return illegal_condition_result;
4875 }
4876
4877 fixed = (cmd[1] & 1) != 0;
4878 transfer = get_unaligned_be24(cmd + 2);
4879 if (fixed) {
4880 num = transfer;
4881 size = devip->tape_blksize;
4882 } else {
4883 if (transfer < TAPE_MIN_BLKSIZE ||
4884 transfer > TAPE_MAX_BLKSIZE) {
4885 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4886 return check_condition_result;
4887 }
4888 num = 1;
4889 size = transfer;
4890 }
4891
4892 scsi_set_resid(scp, num * transfer);
4893 for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4894 i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4895 blp->fl_size = size;
4896 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4897 &(blp->data), 4, i * size, true);
4898 written += size;
4899 scsi_set_resid(scp, num * transfer - written);
4900 ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4901 }
4902
4903 devip->tape_location[partition] = pos;
4904 blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4905 if (pos >= devip->tape_eop[partition] - 1) {
4906 mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4907 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4908 fixed ? num - i : transfer,
4909 SENSE_FLAG_EOM);
4910 return check_condition_result;
4911 }
4912 if (ew) { /* early warning */
4913 mk_sense_info_tape(scp, NO_SENSE,
4914 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4915 fixed ? num - i : transfer,
4916 SENSE_FLAG_EOM);
4917 return check_condition_result;
4918 }
4919
4920 return 0;
4921}
4922
4923static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4924{
4925 bool check_prot;
4926 u32 num;
4927 u8 group = 0;
4928 u32 ei_lba;
4929 int ret;
4930 u64 lba;
4931 struct sdeb_store_info *sip = devip2sip(devip, true);
4932 u8 *cmd = scp->cmnd;
4933 bool meta_data_locked = false;
4934
4935 switch (cmd[0]) {
4936 case WRITE_16:
4937 ei_lba = 0;
4938 lba = get_unaligned_be64(cmd + 2);
4939 num = get_unaligned_be32(cmd + 10);
4940 group = cmd[14] & 0x3f;
4941 check_prot = true;
4942 break;
4943 case WRITE_10:
4944 ei_lba = 0;
4945 lba = get_unaligned_be32(cmd + 2);
4946 group = cmd[6] & 0x3f;
4947 num = get_unaligned_be16(cmd + 7);
4948 check_prot = true;
4949 break;
4950 case WRITE_6:
4951 ei_lba = 0;
4952 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4953 (u32)(cmd[1] & 0x1f) << 16;
4954 num = (0 == cmd[4]) ? 256 : cmd[4];
4955 check_prot = true;
4956 break;
4957 case WRITE_12:
4958 ei_lba = 0;
4959 lba = get_unaligned_be32(cmd + 2);
4960 num = get_unaligned_be32(cmd + 6);
4961 group = cmd[6] & 0x3f;
4962 check_prot = true;
4963 break;
4964 case 0x53: /* XDWRITEREAD(10) */
4965 ei_lba = 0;
4966 lba = get_unaligned_be32(cmd + 2);
4967 group = cmd[6] & 0x1f;
4968 num = get_unaligned_be16(cmd + 7);
4969 check_prot = false;
4970 break;
4971 default: /* assume WRITE(32) */
4972 group = cmd[6] & 0x3f;
4973 lba = get_unaligned_be64(cmd + 12);
4974 ei_lba = get_unaligned_be32(cmd + 20);
4975 num = get_unaligned_be32(cmd + 28);
4976 check_prot = false;
4977 break;
4978 }
4979 if (unlikely(have_dif_prot && check_prot)) {
4980 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4981 (cmd[1] & 0xe0)) {
4982 mk_sense_invalid_opcode(scp);
4983 return check_condition_result;
4984 }
4985 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4986 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4987 (cmd[1] & 0xe0) == 0)
4988 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4989 "to DIF device\n");
4990 }
4991
4992 if (sdebug_dev_is_zoned(devip) ||
4993 (sdebug_dix && scsi_prot_sg_count(scp)) ||
4994 scsi_debug_lbp()) {
4995 sdeb_meta_write_lock(sip);
4996 meta_data_locked = true;
4997 }
4998
4999 ret = check_device_access_params(scp, lba, num, true);
5000 if (ret) {
5001 if (meta_data_locked)
5002 sdeb_meta_write_unlock(sip);
5003 return ret;
5004 }
5005
5006 /* DIX + T10 DIF */
5007 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5008 switch (prot_verify_write(scp, lba, num, ei_lba)) {
5009 case 1: /* Guard tag error */
5010 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
5011 sdeb_meta_write_unlock(sip);
5012 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5013 return illegal_condition_result;
5014 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5015 sdeb_meta_write_unlock(sip);
5016 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5017 return check_condition_result;
5018 }
5019 break;
5020 case 3: /* Reference tag error */
5021 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5022 sdeb_meta_write_unlock(sip);
5023 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5024 return illegal_condition_result;
5025 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5026 sdeb_meta_write_unlock(sip);
5027 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5028 return check_condition_result;
5029 }
5030 break;
5031 }
5032 }
5033
5034 ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5035 if (unlikely(scsi_debug_lbp()))
5036 map_region(sip, lba, num);
5037
5038 /* If ZBC zone then bump its write pointer */
5039 if (sdebug_dev_is_zoned(devip))
5040 zbc_inc_wp(devip, lba, num);
5041 if (meta_data_locked)
5042 sdeb_meta_write_unlock(sip);
5043
5044 if (unlikely(-1 == ret))
5045 return DID_ERROR << 16;
5046 else if (unlikely(sdebug_verbose &&
5047 (ret < (num * sdebug_sector_size))))
5048 sdev_printk(KERN_INFO, scp->device,
5049 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5050 my_name, num * sdebug_sector_size, ret);
5051
5052 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5053 atomic_read(&sdeb_inject_pending))) {
5054 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5055 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5056 atomic_set(&sdeb_inject_pending, 0);
5057 return check_condition_result;
5058 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5059 /* Logical block guard check failed */
5060 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5061 atomic_set(&sdeb_inject_pending, 0);
5062 return illegal_condition_result;
5063 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5064 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5065 atomic_set(&sdeb_inject_pending, 0);
5066 return illegal_condition_result;
5067 }
5068 }
5069 return 0;
5070}
5071
5072/*
5073 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5074 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5075 */
5076static int resp_write_scat(struct scsi_cmnd *scp,
5077 struct sdebug_dev_info *devip)
5078{
5079 u8 *cmd = scp->cmnd;
5080 u8 *lrdp = NULL;
5081 u8 *up;
5082 struct sdeb_store_info *sip = devip2sip(devip, true);
5083 u8 wrprotect;
5084 u16 lbdof, num_lrd, k;
5085 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5086 u32 lb_size = sdebug_sector_size;
5087 u32 ei_lba;
5088 u64 lba;
5089 u8 group;
5090 int ret, res;
5091 bool is_16;
5092 static const u32 lrd_size = 32; /* + parameter list header size */
5093
5094 if (cmd[0] == VARIABLE_LENGTH_CMD) {
5095 is_16 = false;
5096 group = cmd[6] & 0x3f;
5097 wrprotect = (cmd[10] >> 5) & 0x7;
5098 lbdof = get_unaligned_be16(cmd + 12);
5099 num_lrd = get_unaligned_be16(cmd + 16);
5100 bt_len = get_unaligned_be32(cmd + 28);
5101 } else { /* that leaves WRITE SCATTERED(16) */
5102 is_16 = true;
5103 wrprotect = (cmd[2] >> 5) & 0x7;
5104 lbdof = get_unaligned_be16(cmd + 4);
5105 num_lrd = get_unaligned_be16(cmd + 8);
5106 bt_len = get_unaligned_be32(cmd + 10);
5107 group = cmd[14] & 0x3f;
5108 if (unlikely(have_dif_prot)) {
5109 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5110 wrprotect) {
5111 mk_sense_invalid_opcode(scp);
5112 return illegal_condition_result;
5113 }
5114 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5115 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5116 wrprotect == 0)
5117 sdev_printk(KERN_ERR, scp->device,
5118 "Unprotected WR to DIF device\n");
5119 }
5120 }
5121 if ((num_lrd == 0) || (bt_len == 0))
5122 return 0; /* T10 says these do-nothings are not errors */
5123 if (lbdof == 0) {
5124 if (sdebug_verbose)
5125 sdev_printk(KERN_INFO, scp->device,
5126 "%s: %s: LB Data Offset field bad\n",
5127 my_name, __func__);
5128 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5129 return illegal_condition_result;
5130 }
5131 lbdof_blen = lbdof * lb_size;
5132 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5133 if (sdebug_verbose)
5134 sdev_printk(KERN_INFO, scp->device,
5135 "%s: %s: LBA range descriptors don't fit\n",
5136 my_name, __func__);
5137 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5138 return illegal_condition_result;
5139 }
5140 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5141 if (lrdp == NULL)
5142 return SCSI_MLQUEUE_HOST_BUSY;
5143 if (sdebug_verbose)
5144 sdev_printk(KERN_INFO, scp->device,
5145 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5146 my_name, __func__, lbdof_blen);
5147 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5148 if (res == -1) {
5149 ret = DID_ERROR << 16;
5150 goto err_out;
5151 }
5152
5153 /* Just keep it simple and always lock for now */
5154 sdeb_meta_write_lock(sip);
5155 sg_off = lbdof_blen;
5156 /* Spec says Buffer xfer Length field in number of LBs in dout */
5157 cum_lb = 0;
5158 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5159 lba = get_unaligned_be64(up + 0);
5160 num = get_unaligned_be32(up + 8);
5161 if (sdebug_verbose)
5162 sdev_printk(KERN_INFO, scp->device,
5163 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
5164 my_name, __func__, k, lba, num, sg_off);
5165 if (num == 0)
5166 continue;
5167 ret = check_device_access_params(scp, lba, num, true);
5168 if (ret)
5169 goto err_out_unlock;
5170 num_by = num * lb_size;
5171 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5172
5173 if ((cum_lb + num) > bt_len) {
5174 if (sdebug_verbose)
5175 sdev_printk(KERN_INFO, scp->device,
5176 "%s: %s: sum of blocks > data provided\n",
5177 my_name, __func__);
5178 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5179 0);
5180 ret = illegal_condition_result;
5181 goto err_out_unlock;
5182 }
5183
5184 /* DIX + T10 DIF */
5185 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5186 int prot_ret = prot_verify_write(scp, lba, num,
5187 ei_lba);
5188
5189 if (prot_ret) {
5190 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5191 prot_ret);
5192 ret = illegal_condition_result;
5193 goto err_out_unlock;
5194 }
5195 }
5196
5197 /*
5198 * Write ranges atomically to keep as close to pre-atomic
5199 * writes behaviour as possible.
5200 */
5201 ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5202 /* If ZBC zone then bump its write pointer */
5203 if (sdebug_dev_is_zoned(devip))
5204 zbc_inc_wp(devip, lba, num);
5205 if (unlikely(scsi_debug_lbp()))
5206 map_region(sip, lba, num);
5207 if (unlikely(-1 == ret)) {
5208 ret = DID_ERROR << 16;
5209 goto err_out_unlock;
5210 } else if (unlikely(sdebug_verbose && (ret < num_by)))
5211 sdev_printk(KERN_INFO, scp->device,
5212 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5213 my_name, num_by, ret);
5214
5215 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5216 atomic_read(&sdeb_inject_pending))) {
5217 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5218 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5219 atomic_set(&sdeb_inject_pending, 0);
5220 ret = check_condition_result;
5221 goto err_out_unlock;
5222 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5223 /* Logical block guard check failed */
5224 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5225 atomic_set(&sdeb_inject_pending, 0);
5226 ret = illegal_condition_result;
5227 goto err_out_unlock;
5228 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5229 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5230 atomic_set(&sdeb_inject_pending, 0);
5231 ret = illegal_condition_result;
5232 goto err_out_unlock;
5233 }
5234 }
5235 sg_off += num_by;
5236 cum_lb += num;
5237 }
5238 ret = 0;
5239err_out_unlock:
5240 sdeb_meta_write_unlock(sip);
5241err_out:
5242 kfree(lrdp);
5243 return ret;
5244}
5245
5246static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5247 u32 ei_lba, bool unmap, bool ndob)
5248{
5249 struct scsi_device *sdp = scp->device;
5250 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5251 unsigned long long i;
5252 u64 block, lbaa;
5253 u32 lb_size = sdebug_sector_size;
5254 int ret;
5255 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5256 scp->device->hostdata, true);
5257 u8 *fs1p;
5258 u8 *fsp;
5259 bool meta_data_locked = false;
5260
5261 if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5262 sdeb_meta_write_lock(sip);
5263 meta_data_locked = true;
5264 }
5265
5266 ret = check_device_access_params(scp, lba, num, true);
5267 if (ret)
5268 goto out;
5269
5270 if (unmap && scsi_debug_lbp()) {
5271 unmap_region(sip, lba, num);
5272 goto out;
5273 }
5274 lbaa = lba;
5275 block = do_div(lbaa, sdebug_store_sectors);
5276 /* if ndob then zero 1 logical block, else fetch 1 logical block */
5277 fsp = sip->storep;
5278 fs1p = fsp + (block * lb_size);
5279 sdeb_data_write_lock(sip);
5280 if (ndob) {
5281 memset(fs1p, 0, lb_size);
5282 ret = 0;
5283 } else
5284 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5285
5286 if (-1 == ret) {
5287 ret = DID_ERROR << 16;
5288 goto out;
5289 } else if (sdebug_verbose && !ndob && (ret < lb_size))
5290 sdev_printk(KERN_INFO, scp->device,
5291 "%s: %s: lb size=%u, IO sent=%d bytes\n",
5292 my_name, "write same", lb_size, ret);
5293
5294 /* Copy first sector to remaining blocks */
5295 for (i = 1 ; i < num ; i++) {
5296 lbaa = lba + i;
5297 block = do_div(lbaa, sdebug_store_sectors);
5298 memmove(fsp + (block * lb_size), fs1p, lb_size);
5299 }
5300 if (scsi_debug_lbp())
5301 map_region(sip, lba, num);
5302 /* If ZBC zone then bump its write pointer */
5303 if (sdebug_dev_is_zoned(devip))
5304 zbc_inc_wp(devip, lba, num);
5305 sdeb_data_write_unlock(sip);
5306 ret = 0;
5307out:
5308 if (meta_data_locked)
5309 sdeb_meta_write_unlock(sip);
5310 return ret;
5311}
5312
5313static int resp_write_same_10(struct scsi_cmnd *scp,
5314 struct sdebug_dev_info *devip)
5315{
5316 u8 *cmd = scp->cmnd;
5317 u32 lba;
5318 u16 num;
5319 u32 ei_lba = 0;
5320 bool unmap = false;
5321
5322 if (cmd[1] & 0x8) {
5323 if (sdebug_lbpws10 == 0) {
5324 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5325 return check_condition_result;
5326 } else
5327 unmap = true;
5328 }
5329 lba = get_unaligned_be32(cmd + 2);
5330 num = get_unaligned_be16(cmd + 7);
5331 if (num > sdebug_write_same_length) {
5332 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5333 return check_condition_result;
5334 }
5335 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5336}
5337
5338static int resp_write_same_16(struct scsi_cmnd *scp,
5339 struct sdebug_dev_info *devip)
5340{
5341 u8 *cmd = scp->cmnd;
5342 u64 lba;
5343 u32 num;
5344 u32 ei_lba = 0;
5345 bool unmap = false;
5346 bool ndob = false;
5347
5348 if (cmd[1] & 0x8) { /* UNMAP */
5349 if (sdebug_lbpws == 0) {
5350 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5351 return check_condition_result;
5352 } else
5353 unmap = true;
5354 }
5355 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
5356 ndob = true;
5357 lba = get_unaligned_be64(cmd + 2);
5358 num = get_unaligned_be32(cmd + 10);
5359 if (num > sdebug_write_same_length) {
5360 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5361 return check_condition_result;
5362 }
5363 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5364}
5365
5366/* Note the mode field is in the same position as the (lower) service action
5367 * field. For the Report supported operation codes command, SPC-4 suggests
5368 * each mode of this command should be reported separately; for future. */
5369static int resp_write_buffer(struct scsi_cmnd *scp,
5370 struct sdebug_dev_info *devip)
5371{
5372 u8 *cmd = scp->cmnd;
5373 struct scsi_device *sdp = scp->device;
5374 struct sdebug_dev_info *dp;
5375 u8 mode;
5376
5377 mode = cmd[1] & 0x1f;
5378 switch (mode) {
5379 case 0x4: /* download microcode (MC) and activate (ACT) */
5380 /* set UAs on this device only */
5381 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5382 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5383 break;
5384 case 0x5: /* download MC, save and ACT */
5385 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5386 break;
5387 case 0x6: /* download MC with offsets and ACT */
5388 /* set UAs on most devices (LUs) in this target */
5389 list_for_each_entry(dp,
5390 &devip->sdbg_host->dev_info_list,
5391 dev_list)
5392 if (dp->target == sdp->id) {
5393 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5394 if (devip != dp)
5395 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5396 dp->uas_bm);
5397 }
5398 break;
5399 case 0x7: /* download MC with offsets, save, and ACT */
5400 /* set UA on all devices (LUs) in this target */
5401 list_for_each_entry(dp,
5402 &devip->sdbg_host->dev_info_list,
5403 dev_list)
5404 if (dp->target == sdp->id)
5405 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5406 dp->uas_bm);
5407 break;
5408 default:
5409 /* do nothing for this command for other mode values */
5410 break;
5411 }
5412 return 0;
5413}
5414
5415static int resp_comp_write(struct scsi_cmnd *scp,
5416 struct sdebug_dev_info *devip)
5417{
5418 u8 *cmd = scp->cmnd;
5419 u8 *arr;
5420 struct sdeb_store_info *sip = devip2sip(devip, true);
5421 u64 lba;
5422 u32 dnum;
5423 u32 lb_size = sdebug_sector_size;
5424 u8 num;
5425 int ret;
5426 int retval = 0;
5427
5428 lba = get_unaligned_be64(cmd + 2);
5429 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
5430 if (0 == num)
5431 return 0; /* degenerate case, not an error */
5432 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5433 (cmd[1] & 0xe0)) {
5434 mk_sense_invalid_opcode(scp);
5435 return check_condition_result;
5436 }
5437 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5438 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5439 (cmd[1] & 0xe0) == 0)
5440 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5441 "to DIF device\n");
5442 ret = check_device_access_params(scp, lba, num, false);
5443 if (ret)
5444 return ret;
5445 dnum = 2 * num;
5446 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5447 if (NULL == arr) {
5448 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5449 INSUFF_RES_ASCQ);
5450 return check_condition_result;
5451 }
5452
5453 ret = do_dout_fetch(scp, dnum, arr);
5454 if (ret == -1) {
5455 retval = DID_ERROR << 16;
5456 goto cleanup_free;
5457 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
5458 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5459 "indicated=%u, IO sent=%d bytes\n", my_name,
5460 dnum * lb_size, ret);
5461
5462 sdeb_data_write_lock(sip);
5463 sdeb_meta_write_lock(sip);
5464 if (!comp_write_worker(sip, lba, num, arr, false)) {
5465 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5466 retval = check_condition_result;
5467 goto cleanup_unlock;
5468 }
5469
5470 /* Cover sip->map_storep (which map_region()) sets with data lock */
5471 if (scsi_debug_lbp())
5472 map_region(sip, lba, num);
5473cleanup_unlock:
5474 sdeb_meta_write_unlock(sip);
5475 sdeb_data_write_unlock(sip);
5476cleanup_free:
5477 kfree(arr);
5478 return retval;
5479}
5480
5481struct unmap_block_desc {
5482 __be64 lba;
5483 __be32 blocks;
5484 __be32 __reserved;
5485};
5486
5487static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5488{
5489 unsigned char *buf;
5490 struct unmap_block_desc *desc;
5491 struct sdeb_store_info *sip = devip2sip(devip, true);
5492 unsigned int i, payload_len, descriptors;
5493 int ret;
5494
5495 if (!scsi_debug_lbp())
5496 return 0; /* fib and say its done */
5497 payload_len = get_unaligned_be16(scp->cmnd + 7);
5498 BUG_ON(scsi_bufflen(scp) != payload_len);
5499
5500 descriptors = (payload_len - 8) / 16;
5501 if (descriptors > sdebug_unmap_max_desc) {
5502 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5503 return check_condition_result;
5504 }
5505
5506 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5507 if (!buf) {
5508 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5509 INSUFF_RES_ASCQ);
5510 return check_condition_result;
5511 }
5512
5513 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5514
5515 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5516 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5517
5518 desc = (void *)&buf[8];
5519
5520 sdeb_meta_write_lock(sip);
5521
5522 for (i = 0 ; i < descriptors ; i++) {
5523 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5524 unsigned int num = get_unaligned_be32(&desc[i].blocks);
5525
5526 ret = check_device_access_params(scp, lba, num, true);
5527 if (ret)
5528 goto out;
5529
5530 unmap_region(sip, lba, num);
5531 }
5532
5533 ret = 0;
5534
5535out:
5536 sdeb_meta_write_unlock(sip);
5537 kfree(buf);
5538
5539 return ret;
5540}
5541
5542#define SDEBUG_GET_LBA_STATUS_LEN 32
5543
5544static int resp_get_lba_status(struct scsi_cmnd *scp,
5545 struct sdebug_dev_info *devip)
5546{
5547 u8 *cmd = scp->cmnd;
5548 u64 lba;
5549 u32 alloc_len, mapped, num;
5550 int ret;
5551 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5552
5553 lba = get_unaligned_be64(cmd + 2);
5554 alloc_len = get_unaligned_be32(cmd + 10);
5555
5556 if (alloc_len < 24)
5557 return 0;
5558
5559 ret = check_device_access_params(scp, lba, 1, false);
5560 if (ret)
5561 return ret;
5562
5563 if (scsi_debug_lbp()) {
5564 struct sdeb_store_info *sip = devip2sip(devip, true);
5565
5566 mapped = map_state(sip, lba, &num);
5567 } else {
5568 mapped = 1;
5569 /* following just in case virtual_gb changed */
5570 sdebug_capacity = get_sdebug_capacity();
5571 if (sdebug_capacity - lba <= 0xffffffff)
5572 num = sdebug_capacity - lba;
5573 else
5574 num = 0xffffffff;
5575 }
5576
5577 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5578 put_unaligned_be32(20, arr); /* Parameter Data Length */
5579 put_unaligned_be64(lba, arr + 8); /* LBA */
5580 put_unaligned_be32(num, arr + 16); /* Number of blocks */
5581 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
5582
5583 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5584}
5585
5586static int resp_get_stream_status(struct scsi_cmnd *scp,
5587 struct sdebug_dev_info *devip)
5588{
5589 u16 starting_stream_id, stream_id;
5590 const u8 *cmd = scp->cmnd;
5591 u32 alloc_len, offset;
5592 u8 arr[256] = {};
5593 struct scsi_stream_status_header *h = (void *)arr;
5594
5595 starting_stream_id = get_unaligned_be16(cmd + 4);
5596 alloc_len = get_unaligned_be32(cmd + 10);
5597
5598 if (alloc_len < 8) {
5599 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5600 return check_condition_result;
5601 }
5602
5603 if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5604 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5605 return check_condition_result;
5606 }
5607
5608 /*
5609 * The GET STREAM STATUS command only reports status information
5610 * about open streams. Treat the non-permanent stream as open.
5611 */
5612 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5613 &h->number_of_open_streams);
5614
5615 for (offset = 8, stream_id = starting_stream_id;
5616 offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5617 stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5618 offset += 8, stream_id++) {
5619 struct scsi_stream_status *stream_status = (void *)arr + offset;
5620
5621 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5622 put_unaligned_be16(stream_id,
5623 &stream_status->stream_identifier);
5624 stream_status->rel_lifetime = stream_id + 1;
5625 }
5626 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5627
5628 return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5629}
5630
5631static int resp_sync_cache(struct scsi_cmnd *scp,
5632 struct sdebug_dev_info *devip)
5633{
5634 int res = 0;
5635 u64 lba;
5636 u32 num_blocks;
5637 u8 *cmd = scp->cmnd;
5638
5639 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
5640 lba = get_unaligned_be32(cmd + 2);
5641 num_blocks = get_unaligned_be16(cmd + 7);
5642 } else { /* SYNCHRONIZE_CACHE(16) */
5643 lba = get_unaligned_be64(cmd + 2);
5644 num_blocks = get_unaligned_be32(cmd + 10);
5645 }
5646 if (lba + num_blocks > sdebug_capacity) {
5647 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5648 return check_condition_result;
5649 }
5650 if (!write_since_sync || (cmd[1] & 0x2))
5651 res = SDEG_RES_IMMED_MASK;
5652 else /* delay if write_since_sync and IMMED clear */
5653 write_since_sync = false;
5654 return res;
5655}
5656
5657/*
5658 * Assuming the LBA+num_blocks is not out-of-range, this function will return
5659 * CONDITION MET if the specified blocks will/have fitted in the cache, and
5660 * a GOOD status otherwise. Model a disk with a big cache and yield
5661 * CONDITION MET. Actually tries to bring range in main memory into the
5662 * cache associated with the CPU(s).
5663 *
5664 * The pcode 0x34 is also used for READ POSITION by tape devices.
5665 */
5666static int resp_pre_fetch(struct scsi_cmnd *scp,
5667 struct sdebug_dev_info *devip)
5668{
5669 int res = 0;
5670 u64 lba;
5671 u64 block, rest = 0;
5672 u32 nblks;
5673 u8 *cmd = scp->cmnd;
5674 struct sdeb_store_info *sip = devip2sip(devip, true);
5675 u8 *fsp = sip->storep;
5676
5677 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
5678 lba = get_unaligned_be32(cmd + 2);
5679 nblks = get_unaligned_be16(cmd + 7);
5680 } else { /* PRE-FETCH(16) */
5681 lba = get_unaligned_be64(cmd + 2);
5682 nblks = get_unaligned_be32(cmd + 10);
5683 }
5684 if (lba + nblks > sdebug_capacity) {
5685 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5686 return check_condition_result;
5687 }
5688 if (!fsp)
5689 goto fini;
5690 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
5691 block = do_div(lba, sdebug_store_sectors);
5692 if (block + nblks > sdebug_store_sectors)
5693 rest = block + nblks - sdebug_store_sectors;
5694
5695 /* Try to bring the PRE-FETCH range into CPU's cache */
5696 sdeb_data_read_lock(sip);
5697 prefetch_range(fsp + (sdebug_sector_size * block),
5698 (nblks - rest) * sdebug_sector_size);
5699 if (rest)
5700 prefetch_range(fsp, rest * sdebug_sector_size);
5701
5702 sdeb_data_read_unlock(sip);
5703fini:
5704 if (cmd[1] & 0x2)
5705 res = SDEG_RES_IMMED_MASK;
5706 return res | condition_met_result;
5707}
5708
5709#define RL_BUCKET_ELEMS 8
5710
5711/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5712 * (W-LUN), the normal Linux scanning logic does not associate it with a
5713 * device (e.g. /dev/sg7). The following magic will make that association:
5714 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5715 * where <n> is a host number. If there are multiple targets in a host then
5716 * the above will associate a W-LUN to each target. To only get a W-LUN
5717 * for target 2, then use "echo '- 2 49409' > scan" .
5718 */
5719static int resp_report_luns(struct scsi_cmnd *scp,
5720 struct sdebug_dev_info *devip)
5721{
5722 unsigned char *cmd = scp->cmnd;
5723 unsigned int alloc_len;
5724 unsigned char select_report;
5725 u64 lun;
5726 struct scsi_lun *lun_p;
5727 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5728 unsigned int lun_cnt; /* normal LUN count (max: 256) */
5729 unsigned int wlun_cnt; /* report luns W-LUN count */
5730 unsigned int tlun_cnt; /* total LUN count */
5731 unsigned int rlen; /* response length (in bytes) */
5732 int k, j, n, res;
5733 unsigned int off_rsp = 0;
5734 const int sz_lun = sizeof(struct scsi_lun);
5735
5736 clear_luns_changed_on_target(devip);
5737
5738 select_report = cmd[2];
5739 alloc_len = get_unaligned_be32(cmd + 6);
5740
5741 if (alloc_len < 4) {
5742 pr_err("alloc len too small %d\n", alloc_len);
5743 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5744 return check_condition_result;
5745 }
5746
5747 switch (select_report) {
5748 case 0: /* all LUNs apart from W-LUNs */
5749 lun_cnt = sdebug_max_luns;
5750 wlun_cnt = 0;
5751 break;
5752 case 1: /* only W-LUNs */
5753 lun_cnt = 0;
5754 wlun_cnt = 1;
5755 break;
5756 case 2: /* all LUNs */
5757 lun_cnt = sdebug_max_luns;
5758 wlun_cnt = 1;
5759 break;
5760 case 0x10: /* only administrative LUs */
5761 case 0x11: /* see SPC-5 */
5762 case 0x12: /* only subsiduary LUs owned by referenced LU */
5763 default:
5764 pr_debug("select report invalid %d\n", select_report);
5765 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5766 return check_condition_result;
5767 }
5768
5769 if (sdebug_no_lun_0 && (lun_cnt > 0))
5770 --lun_cnt;
5771
5772 tlun_cnt = lun_cnt + wlun_cnt;
5773 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
5774 scsi_set_resid(scp, scsi_bufflen(scp));
5775 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5776 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5777
5778 /* loops rely on sizeof response header same as sizeof lun (both 8) */
5779 lun = sdebug_no_lun_0 ? 1 : 0;
5780 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5781 memset(arr, 0, sizeof(arr));
5782 lun_p = (struct scsi_lun *)&arr[0];
5783 if (k == 0) {
5784 put_unaligned_be32(rlen, &arr[0]);
5785 ++lun_p;
5786 j = 1;
5787 }
5788 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5789 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5790 break;
5791 int_to_scsilun(lun++, lun_p);
5792 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5793 lun_p->scsi_lun[0] |= 0x40;
5794 }
5795 if (j < RL_BUCKET_ELEMS)
5796 break;
5797 n = j * sz_lun;
5798 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5799 if (res)
5800 return res;
5801 off_rsp += n;
5802 }
5803 if (wlun_cnt) {
5804 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5805 ++j;
5806 }
5807 if (j > 0)
5808 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5809 return res;
5810}
5811
5812static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5813{
5814 bool is_bytchk3 = false;
5815 u8 bytchk;
5816 int ret, j;
5817 u32 vnum, a_num, off;
5818 const u32 lb_size = sdebug_sector_size;
5819 u64 lba;
5820 u8 *arr;
5821 u8 *cmd = scp->cmnd;
5822 struct sdeb_store_info *sip = devip2sip(devip, true);
5823
5824 bytchk = (cmd[1] >> 1) & 0x3;
5825 if (bytchk == 0) {
5826 return 0; /* always claim internal verify okay */
5827 } else if (bytchk == 2) {
5828 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5829 return check_condition_result;
5830 } else if (bytchk == 3) {
5831 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
5832 }
5833 switch (cmd[0]) {
5834 case VERIFY_16:
5835 lba = get_unaligned_be64(cmd + 2);
5836 vnum = get_unaligned_be32(cmd + 10);
5837 break;
5838 case VERIFY: /* is VERIFY(10) */
5839 lba = get_unaligned_be32(cmd + 2);
5840 vnum = get_unaligned_be16(cmd + 7);
5841 break;
5842 default:
5843 mk_sense_invalid_opcode(scp);
5844 return check_condition_result;
5845 }
5846 if (vnum == 0)
5847 return 0; /* not an error */
5848 a_num = is_bytchk3 ? 1 : vnum;
5849 /* Treat following check like one for read (i.e. no write) access */
5850 ret = check_device_access_params(scp, lba, a_num, false);
5851 if (ret)
5852 return ret;
5853
5854 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5855 if (!arr) {
5856 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5857 INSUFF_RES_ASCQ);
5858 return check_condition_result;
5859 }
5860 /* Not changing store, so only need read access */
5861 sdeb_data_read_lock(sip);
5862
5863 ret = do_dout_fetch(scp, a_num, arr);
5864 if (ret == -1) {
5865 ret = DID_ERROR << 16;
5866 goto cleanup;
5867 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5868 sdev_printk(KERN_INFO, scp->device,
5869 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5870 my_name, __func__, a_num * lb_size, ret);
5871 }
5872 if (is_bytchk3) {
5873 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5874 memcpy(arr + off, arr, lb_size);
5875 }
5876 ret = 0;
5877 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5878 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5879 ret = check_condition_result;
5880 goto cleanup;
5881 }
5882cleanup:
5883 sdeb_data_read_unlock(sip);
5884 kfree(arr);
5885 return ret;
5886}
5887
5888#define RZONES_DESC_HD 64
5889
5890/* Report zones depending on start LBA and reporting options */
5891static int resp_report_zones(struct scsi_cmnd *scp,
5892 struct sdebug_dev_info *devip)
5893{
5894 unsigned int rep_max_zones, nrz = 0;
5895 int ret = 0;
5896 u32 alloc_len, rep_opts, rep_len;
5897 bool partial;
5898 u64 lba, zs_lba;
5899 u8 *arr = NULL, *desc;
5900 u8 *cmd = scp->cmnd;
5901 struct sdeb_zone_state *zsp = NULL;
5902 struct sdeb_store_info *sip = devip2sip(devip, false);
5903
5904 if (!sdebug_dev_is_zoned(devip)) {
5905 mk_sense_invalid_opcode(scp);
5906 return check_condition_result;
5907 }
5908 zs_lba = get_unaligned_be64(cmd + 2);
5909 alloc_len = get_unaligned_be32(cmd + 10);
5910 if (alloc_len == 0)
5911 return 0; /* not an error */
5912 rep_opts = cmd[14] & 0x3f;
5913 partial = cmd[14] & 0x80;
5914
5915 if (zs_lba >= sdebug_capacity) {
5916 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5917 return check_condition_result;
5918 }
5919
5920 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5921
5922 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5923 if (!arr) {
5924 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5925 INSUFF_RES_ASCQ);
5926 return check_condition_result;
5927 }
5928
5929 sdeb_meta_read_lock(sip);
5930
5931 desc = arr + 64;
5932 for (lba = zs_lba; lba < sdebug_capacity;
5933 lba = zsp->z_start + zsp->z_size) {
5934 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5935 break;
5936 zsp = zbc_zone(devip, lba);
5937 switch (rep_opts) {
5938 case 0x00:
5939 /* All zones */
5940 break;
5941 case 0x01:
5942 /* Empty zones */
5943 if (zsp->z_cond != ZC1_EMPTY)
5944 continue;
5945 break;
5946 case 0x02:
5947 /* Implicit open zones */
5948 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5949 continue;
5950 break;
5951 case 0x03:
5952 /* Explicit open zones */
5953 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5954 continue;
5955 break;
5956 case 0x04:
5957 /* Closed zones */
5958 if (zsp->z_cond != ZC4_CLOSED)
5959 continue;
5960 break;
5961 case 0x05:
5962 /* Full zones */
5963 if (zsp->z_cond != ZC5_FULL)
5964 continue;
5965 break;
5966 case 0x06:
5967 case 0x07:
5968 case 0x10:
5969 /*
5970 * Read-only, offline, reset WP recommended are
5971 * not emulated: no zones to report;
5972 */
5973 continue;
5974 case 0x11:
5975 /* non-seq-resource set */
5976 if (!zsp->z_non_seq_resource)
5977 continue;
5978 break;
5979 case 0x3e:
5980 /* All zones except gap zones. */
5981 if (zbc_zone_is_gap(zsp))
5982 continue;
5983 break;
5984 case 0x3f:
5985 /* Not write pointer (conventional) zones */
5986 if (zbc_zone_is_seq(zsp))
5987 continue;
5988 break;
5989 default:
5990 mk_sense_buffer(scp, ILLEGAL_REQUEST,
5991 INVALID_FIELD_IN_CDB, 0);
5992 ret = check_condition_result;
5993 goto fini;
5994 }
5995
5996 if (nrz < rep_max_zones) {
5997 /* Fill zone descriptor */
5998 desc[0] = zsp->z_type;
5999 desc[1] = zsp->z_cond << 4;
6000 if (zsp->z_non_seq_resource)
6001 desc[1] |= 1 << 1;
6002 put_unaligned_be64((u64)zsp->z_size, desc + 8);
6003 put_unaligned_be64((u64)zsp->z_start, desc + 16);
6004 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
6005 desc += 64;
6006 }
6007
6008 if (partial && nrz >= rep_max_zones)
6009 break;
6010
6011 nrz++;
6012 }
6013
6014 /* Report header */
6015 /* Zone list length. */
6016 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6017 /* Maximum LBA */
6018 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6019 /* Zone starting LBA granularity. */
6020 if (devip->zcap < devip->zsize)
6021 put_unaligned_be64(devip->zsize, arr + 16);
6022
6023 rep_len = (unsigned long)desc - (unsigned long)arr;
6024 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6025
6026fini:
6027 sdeb_meta_read_unlock(sip);
6028 kfree(arr);
6029 return ret;
6030}
6031
6032static int resp_atomic_write(struct scsi_cmnd *scp,
6033 struct sdebug_dev_info *devip)
6034{
6035 struct sdeb_store_info *sip;
6036 u8 *cmd = scp->cmnd;
6037 u16 boundary, len;
6038 u64 lba, lba_tmp;
6039 int ret;
6040
6041 if (!scsi_debug_atomic_write()) {
6042 mk_sense_invalid_opcode(scp);
6043 return check_condition_result;
6044 }
6045
6046 sip = devip2sip(devip, true);
6047
6048 lba = get_unaligned_be64(cmd + 2);
6049 boundary = get_unaligned_be16(cmd + 10);
6050 len = get_unaligned_be16(cmd + 12);
6051
6052 lba_tmp = lba;
6053 if (sdebug_atomic_wr_align &&
6054 do_div(lba_tmp, sdebug_atomic_wr_align)) {
6055 /* Does not meet alignment requirement */
6056 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6057 return check_condition_result;
6058 }
6059
6060 if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6061 /* Does not meet alignment requirement */
6062 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6063 return check_condition_result;
6064 }
6065
6066 if (boundary > 0) {
6067 if (boundary > sdebug_atomic_wr_max_bndry) {
6068 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6069 return check_condition_result;
6070 }
6071
6072 if (len > sdebug_atomic_wr_max_length_bndry) {
6073 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6074 return check_condition_result;
6075 }
6076 } else {
6077 if (len > sdebug_atomic_wr_max_length) {
6078 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6079 return check_condition_result;
6080 }
6081 }
6082
6083 ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6084 if (unlikely(ret == -1))
6085 return DID_ERROR << 16;
6086 if (unlikely(ret != len * sdebug_sector_size))
6087 return DID_ERROR << 16;
6088 return 0;
6089}
6090
6091/* Logic transplanted from tcmu-runner, file_zbc.c */
6092static void zbc_open_all(struct sdebug_dev_info *devip)
6093{
6094 struct sdeb_zone_state *zsp = &devip->zstate[0];
6095 unsigned int i;
6096
6097 for (i = 0; i < devip->nr_zones; i++, zsp++) {
6098 if (zsp->z_cond == ZC4_CLOSED)
6099 zbc_open_zone(devip, &devip->zstate[i], true);
6100 }
6101}
6102
6103static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6104{
6105 int res = 0;
6106 u64 z_id;
6107 enum sdebug_z_cond zc;
6108 u8 *cmd = scp->cmnd;
6109 struct sdeb_zone_state *zsp;
6110 bool all = cmd[14] & 0x01;
6111 struct sdeb_store_info *sip = devip2sip(devip, false);
6112
6113 if (!sdebug_dev_is_zoned(devip)) {
6114 mk_sense_invalid_opcode(scp);
6115 return check_condition_result;
6116 }
6117 sdeb_meta_write_lock(sip);
6118
6119 if (all) {
6120 /* Check if all closed zones can be open */
6121 if (devip->max_open &&
6122 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6123 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6124 INSUFF_ZONE_ASCQ);
6125 res = check_condition_result;
6126 goto fini;
6127 }
6128 /* Open all closed zones */
6129 zbc_open_all(devip);
6130 goto fini;
6131 }
6132
6133 /* Open the specified zone */
6134 z_id = get_unaligned_be64(cmd + 2);
6135 if (z_id >= sdebug_capacity) {
6136 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6137 res = check_condition_result;
6138 goto fini;
6139 }
6140
6141 zsp = zbc_zone(devip, z_id);
6142 if (z_id != zsp->z_start) {
6143 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6144 res = check_condition_result;
6145 goto fini;
6146 }
6147 if (zbc_zone_is_conv(zsp)) {
6148 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6149 res = check_condition_result;
6150 goto fini;
6151 }
6152
6153 zc = zsp->z_cond;
6154 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6155 goto fini;
6156
6157 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6158 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6159 INSUFF_ZONE_ASCQ);
6160 res = check_condition_result;
6161 goto fini;
6162 }
6163
6164 zbc_open_zone(devip, zsp, true);
6165fini:
6166 sdeb_meta_write_unlock(sip);
6167 return res;
6168}
6169
6170static void zbc_close_all(struct sdebug_dev_info *devip)
6171{
6172 unsigned int i;
6173
6174 for (i = 0; i < devip->nr_zones; i++)
6175 zbc_close_zone(devip, &devip->zstate[i]);
6176}
6177
6178static int resp_close_zone(struct scsi_cmnd *scp,
6179 struct sdebug_dev_info *devip)
6180{
6181 int res = 0;
6182 u64 z_id;
6183 u8 *cmd = scp->cmnd;
6184 struct sdeb_zone_state *zsp;
6185 bool all = cmd[14] & 0x01;
6186 struct sdeb_store_info *sip = devip2sip(devip, false);
6187
6188 if (!sdebug_dev_is_zoned(devip)) {
6189 mk_sense_invalid_opcode(scp);
6190 return check_condition_result;
6191 }
6192
6193 sdeb_meta_write_lock(sip);
6194
6195 if (all) {
6196 zbc_close_all(devip);
6197 goto fini;
6198 }
6199
6200 /* Close specified zone */
6201 z_id = get_unaligned_be64(cmd + 2);
6202 if (z_id >= sdebug_capacity) {
6203 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6204 res = check_condition_result;
6205 goto fini;
6206 }
6207
6208 zsp = zbc_zone(devip, z_id);
6209 if (z_id != zsp->z_start) {
6210 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6211 res = check_condition_result;
6212 goto fini;
6213 }
6214 if (zbc_zone_is_conv(zsp)) {
6215 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6216 res = check_condition_result;
6217 goto fini;
6218 }
6219
6220 zbc_close_zone(devip, zsp);
6221fini:
6222 sdeb_meta_write_unlock(sip);
6223 return res;
6224}
6225
6226static void zbc_finish_zone(struct sdebug_dev_info *devip,
6227 struct sdeb_zone_state *zsp, bool empty)
6228{
6229 enum sdebug_z_cond zc = zsp->z_cond;
6230
6231 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6232 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6233 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6234 zbc_close_zone(devip, zsp);
6235 if (zsp->z_cond == ZC4_CLOSED)
6236 devip->nr_closed--;
6237 zsp->z_wp = zsp->z_start + zsp->z_size;
6238 zsp->z_cond = ZC5_FULL;
6239 }
6240}
6241
6242static void zbc_finish_all(struct sdebug_dev_info *devip)
6243{
6244 unsigned int i;
6245
6246 for (i = 0; i < devip->nr_zones; i++)
6247 zbc_finish_zone(devip, &devip->zstate[i], false);
6248}
6249
6250static int resp_finish_zone(struct scsi_cmnd *scp,
6251 struct sdebug_dev_info *devip)
6252{
6253 struct sdeb_zone_state *zsp;
6254 int res = 0;
6255 u64 z_id;
6256 u8 *cmd = scp->cmnd;
6257 bool all = cmd[14] & 0x01;
6258 struct sdeb_store_info *sip = devip2sip(devip, false);
6259
6260 if (!sdebug_dev_is_zoned(devip)) {
6261 mk_sense_invalid_opcode(scp);
6262 return check_condition_result;
6263 }
6264
6265 sdeb_meta_write_lock(sip);
6266
6267 if (all) {
6268 zbc_finish_all(devip);
6269 goto fini;
6270 }
6271
6272 /* Finish the specified zone */
6273 z_id = get_unaligned_be64(cmd + 2);
6274 if (z_id >= sdebug_capacity) {
6275 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6276 res = check_condition_result;
6277 goto fini;
6278 }
6279
6280 zsp = zbc_zone(devip, z_id);
6281 if (z_id != zsp->z_start) {
6282 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6283 res = check_condition_result;
6284 goto fini;
6285 }
6286 if (zbc_zone_is_conv(zsp)) {
6287 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6288 res = check_condition_result;
6289 goto fini;
6290 }
6291
6292 zbc_finish_zone(devip, zsp, true);
6293fini:
6294 sdeb_meta_write_unlock(sip);
6295 return res;
6296}
6297
6298static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6299 struct sdeb_zone_state *zsp)
6300{
6301 enum sdebug_z_cond zc;
6302 struct sdeb_store_info *sip = devip2sip(devip, false);
6303
6304 if (!zbc_zone_is_seq(zsp))
6305 return;
6306
6307 zc = zsp->z_cond;
6308 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6309 zbc_close_zone(devip, zsp);
6310
6311 if (zsp->z_cond == ZC4_CLOSED)
6312 devip->nr_closed--;
6313
6314 if (zsp->z_wp > zsp->z_start)
6315 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6316 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6317
6318 zsp->z_non_seq_resource = false;
6319 zsp->z_wp = zsp->z_start;
6320 zsp->z_cond = ZC1_EMPTY;
6321}
6322
6323static void zbc_rwp_all(struct sdebug_dev_info *devip)
6324{
6325 unsigned int i;
6326
6327 for (i = 0; i < devip->nr_zones; i++)
6328 zbc_rwp_zone(devip, &devip->zstate[i]);
6329}
6330
6331static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6332{
6333 struct sdeb_zone_state *zsp;
6334 int res = 0;
6335 u64 z_id;
6336 u8 *cmd = scp->cmnd;
6337 bool all = cmd[14] & 0x01;
6338 struct sdeb_store_info *sip = devip2sip(devip, false);
6339
6340 if (!sdebug_dev_is_zoned(devip)) {
6341 mk_sense_invalid_opcode(scp);
6342 return check_condition_result;
6343 }
6344
6345 sdeb_meta_write_lock(sip);
6346
6347 if (all) {
6348 zbc_rwp_all(devip);
6349 goto fini;
6350 }
6351
6352 z_id = get_unaligned_be64(cmd + 2);
6353 if (z_id >= sdebug_capacity) {
6354 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6355 res = check_condition_result;
6356 goto fini;
6357 }
6358
6359 zsp = zbc_zone(devip, z_id);
6360 if (z_id != zsp->z_start) {
6361 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6362 res = check_condition_result;
6363 goto fini;
6364 }
6365 if (zbc_zone_is_conv(zsp)) {
6366 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6367 res = check_condition_result;
6368 goto fini;
6369 }
6370
6371 zbc_rwp_zone(devip, zsp);
6372fini:
6373 sdeb_meta_write_unlock(sip);
6374 return res;
6375}
6376
6377static u32 get_tag(struct scsi_cmnd *cmnd)
6378{
6379 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6380}
6381
6382/* Queued (deferred) command completions converge here. */
6383static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6384{
6385 struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6386 typeof(*sdsc), sd_dp);
6387 struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6388 unsigned long flags;
6389 bool aborted;
6390
6391 if (sdebug_statistics) {
6392 atomic_inc(&sdebug_completions);
6393 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6394 atomic_inc(&sdebug_miss_cpus);
6395 }
6396
6397 if (!scp) {
6398 pr_err("scmd=NULL\n");
6399 return;
6400 }
6401
6402 spin_lock_irqsave(&sdsc->lock, flags);
6403 aborted = sd_dp->aborted;
6404 if (unlikely(aborted))
6405 sd_dp->aborted = false;
6406
6407 spin_unlock_irqrestore(&sdsc->lock, flags);
6408
6409 if (aborted) {
6410 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6411 blk_abort_request(scsi_cmd_to_rq(scp));
6412 return;
6413 }
6414
6415 scsi_done(scp); /* callback to mid level */
6416}
6417
6418/* When high resolution timer goes off this function is called. */
6419static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6420{
6421 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6422 hrt);
6423 sdebug_q_cmd_complete(sd_dp);
6424 return HRTIMER_NORESTART;
6425}
6426
6427/* When work queue schedules work, it calls this function. */
6428static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6429{
6430 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6431 ew.work);
6432 sdebug_q_cmd_complete(sd_dp);
6433}
6434
6435static bool got_shared_uuid;
6436static uuid_t shared_uuid;
6437
6438static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6439{
6440 struct sdeb_zone_state *zsp;
6441 sector_t capacity = get_sdebug_capacity();
6442 sector_t conv_capacity;
6443 sector_t zstart = 0;
6444 unsigned int i;
6445
6446 /*
6447 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6448 * a zone size allowing for at least 4 zones on the device. Otherwise,
6449 * use the specified zone size checking that at least 2 zones can be
6450 * created for the device.
6451 */
6452 if (!sdeb_zbc_zone_size_mb) {
6453 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6454 >> ilog2(sdebug_sector_size);
6455 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6456 devip->zsize >>= 1;
6457 if (devip->zsize < 2) {
6458 pr_err("Device capacity too small\n");
6459 return -EINVAL;
6460 }
6461 } else {
6462 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6463 pr_err("Zone size is not a power of 2\n");
6464 return -EINVAL;
6465 }
6466 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6467 >> ilog2(sdebug_sector_size);
6468 if (devip->zsize >= capacity) {
6469 pr_err("Zone size too large for device capacity\n");
6470 return -EINVAL;
6471 }
6472 }
6473
6474 devip->zsize_shift = ilog2(devip->zsize);
6475 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6476
6477 if (sdeb_zbc_zone_cap_mb == 0) {
6478 devip->zcap = devip->zsize;
6479 } else {
6480 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6481 ilog2(sdebug_sector_size);
6482 if (devip->zcap > devip->zsize) {
6483 pr_err("Zone capacity too large\n");
6484 return -EINVAL;
6485 }
6486 }
6487
6488 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6489 if (conv_capacity >= capacity) {
6490 pr_err("Number of conventional zones too large\n");
6491 return -EINVAL;
6492 }
6493 devip->nr_conv_zones = sdeb_zbc_nr_conv;
6494 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6495 devip->zsize_shift;
6496 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6497
6498 /* Add gap zones if zone capacity is smaller than the zone size */
6499 if (devip->zcap < devip->zsize)
6500 devip->nr_zones += devip->nr_seq_zones;
6501
6502 if (devip->zoned) {
6503 /* zbc_max_open_zones can be 0, meaning "not reported" */
6504 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6505 devip->max_open = (devip->nr_zones - 1) / 2;
6506 else
6507 devip->max_open = sdeb_zbc_max_open;
6508 }
6509
6510 devip->zstate = kcalloc(devip->nr_zones,
6511 sizeof(struct sdeb_zone_state), GFP_KERNEL);
6512 if (!devip->zstate)
6513 return -ENOMEM;
6514
6515 for (i = 0; i < devip->nr_zones; i++) {
6516 zsp = &devip->zstate[i];
6517
6518 zsp->z_start = zstart;
6519
6520 if (i < devip->nr_conv_zones) {
6521 zsp->z_type = ZBC_ZTYPE_CNV;
6522 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6523 zsp->z_wp = (sector_t)-1;
6524 zsp->z_size =
6525 min_t(u64, devip->zsize, capacity - zstart);
6526 } else if ((zstart & (devip->zsize - 1)) == 0) {
6527 if (devip->zoned)
6528 zsp->z_type = ZBC_ZTYPE_SWR;
6529 else
6530 zsp->z_type = ZBC_ZTYPE_SWP;
6531 zsp->z_cond = ZC1_EMPTY;
6532 zsp->z_wp = zsp->z_start;
6533 zsp->z_size =
6534 min_t(u64, devip->zcap, capacity - zstart);
6535 } else {
6536 zsp->z_type = ZBC_ZTYPE_GAP;
6537 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6538 zsp->z_wp = (sector_t)-1;
6539 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6540 capacity - zstart);
6541 }
6542
6543 WARN_ON_ONCE((int)zsp->z_size <= 0);
6544 zstart += zsp->z_size;
6545 }
6546
6547 return 0;
6548}
6549
6550static struct sdebug_dev_info *sdebug_device_create(
6551 struct sdebug_host_info *sdbg_host, gfp_t flags)
6552{
6553 struct sdebug_dev_info *devip;
6554
6555 devip = kzalloc(sizeof(*devip), flags);
6556 if (devip) {
6557 if (sdebug_uuid_ctl == 1)
6558 uuid_gen(&devip->lu_name);
6559 else if (sdebug_uuid_ctl == 2) {
6560 if (got_shared_uuid)
6561 devip->lu_name = shared_uuid;
6562 else {
6563 uuid_gen(&shared_uuid);
6564 got_shared_uuid = true;
6565 devip->lu_name = shared_uuid;
6566 }
6567 }
6568 devip->sdbg_host = sdbg_host;
6569 if (sdeb_zbc_in_use) {
6570 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6571 if (sdebug_device_create_zones(devip)) {
6572 kfree(devip);
6573 return NULL;
6574 }
6575 } else {
6576 devip->zoned = false;
6577 }
6578 if (sdebug_ptype == TYPE_TAPE) {
6579 devip->tape_density = TAPE_DEF_DENSITY;
6580 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6581 }
6582 devip->create_ts = ktime_get_boottime();
6583 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6584 spin_lock_init(&devip->list_lock);
6585 INIT_LIST_HEAD(&devip->inject_err_list);
6586 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6587 }
6588 return devip;
6589}
6590
6591static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6592{
6593 struct sdebug_host_info *sdbg_host;
6594 struct sdebug_dev_info *open_devip = NULL;
6595 struct sdebug_dev_info *devip;
6596
6597 sdbg_host = shost_to_sdebug_host(sdev->host);
6598
6599 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6600 if ((devip->used) && (devip->channel == sdev->channel) &&
6601 (devip->target == sdev->id) &&
6602 (devip->lun == sdev->lun))
6603 return devip;
6604 else {
6605 if ((!devip->used) && (!open_devip))
6606 open_devip = devip;
6607 }
6608 }
6609 if (!open_devip) { /* try and make a new one */
6610 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6611 if (!open_devip) {
6612 pr_err("out of memory at line %d\n", __LINE__);
6613 return NULL;
6614 }
6615 }
6616
6617 open_devip->channel = sdev->channel;
6618 open_devip->target = sdev->id;
6619 open_devip->lun = sdev->lun;
6620 open_devip->sdbg_host = sdbg_host;
6621 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6622 open_devip->used = true;
6623 return open_devip;
6624}
6625
6626static int scsi_debug_sdev_init(struct scsi_device *sdp)
6627{
6628 if (sdebug_verbose)
6629 pr_info("sdev_init <%u %u %u %llu>\n",
6630 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6631
6632 return 0;
6633}
6634
6635static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6636 struct queue_limits *lim)
6637{
6638 struct sdebug_dev_info *devip =
6639 (struct sdebug_dev_info *)sdp->hostdata;
6640 struct dentry *dentry;
6641
6642 if (sdebug_verbose)
6643 pr_info("sdev_configure <%u %u %u %llu>\n",
6644 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6645 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6646 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6647 if (devip == NULL) {
6648 devip = find_build_dev_info(sdp);
6649 if (devip == NULL)
6650 return 1; /* no resources, will be marked offline */
6651 }
6652 if (sdebug_ptype == TYPE_TAPE) {
6653 if (!devip->tape_blocks[0]) {
6654 devip->tape_blocks[0] =
6655 kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6656 GFP_KERNEL);
6657 if (!devip->tape_blocks[0])
6658 return 1;
6659 }
6660 devip->tape_pending_nbr_partitions = -1;
6661 if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6662 kfree(devip->tape_blocks[0]);
6663 devip->tape_blocks[0] = NULL;
6664 return 1;
6665 }
6666 }
6667 sdp->hostdata = devip;
6668 if (sdebug_no_uld)
6669 sdp->no_uld_attach = 1;
6670 config_cdb_len(sdp);
6671
6672 if (sdebug_allow_restart)
6673 sdp->allow_restart = 1;
6674
6675 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6676 sdebug_debugfs_root);
6677 if (IS_ERR_OR_NULL(devip->debugfs_entry))
6678 pr_info("%s: failed to create debugfs directory for device %s\n",
6679 __func__, dev_name(&sdp->sdev_gendev));
6680
6681 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6682 &sdebug_error_fops);
6683 if (IS_ERR_OR_NULL(dentry))
6684 pr_info("%s: failed to create error file for device %s\n",
6685 __func__, dev_name(&sdp->sdev_gendev));
6686
6687 return 0;
6688}
6689
6690static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6691{
6692 struct sdebug_dev_info *devip =
6693 (struct sdebug_dev_info *)sdp->hostdata;
6694 struct sdebug_err_inject *err;
6695
6696 if (sdebug_verbose)
6697 pr_info("sdev_destroy <%u %u %u %llu>\n",
6698 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6699
6700 if (!devip)
6701 return;
6702
6703 spin_lock(&devip->list_lock);
6704 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6705 list_del_rcu(&err->list);
6706 call_rcu(&err->rcu, sdebug_err_free);
6707 }
6708 spin_unlock(&devip->list_lock);
6709
6710 debugfs_remove(devip->debugfs_entry);
6711
6712 if (sdp->type == TYPE_TAPE) {
6713 kfree(devip->tape_blocks[0]);
6714 devip->tape_blocks[0] = NULL;
6715 }
6716
6717 /* make this slot available for re-use */
6718 devip->used = false;
6719 sdp->hostdata = NULL;
6720}
6721
6722/* Returns true if cancelled or not running callback. */
6723static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6724{
6725 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6726 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6727 enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6728
6729 lockdep_assert_held(&sdsc->lock);
6730
6731 if (defer_t == SDEB_DEFER_HRT) {
6732 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6733
6734 switch (res) {
6735 case -1: /* -1 It's executing the CB */
6736 return false;
6737 case 0: /* Not active, it must have already run */
6738 case 1: /* Was active, we've now cancelled */
6739 default:
6740 return true;
6741 }
6742 } else if (defer_t == SDEB_DEFER_WQ) {
6743 /* Cancel if pending */
6744 if (cancel_work(&sd_dp->ew.work))
6745 return true;
6746 /* callback may be running, so return false */
6747 return false;
6748 } else if (defer_t == SDEB_DEFER_POLL) {
6749 return true;
6750 }
6751
6752 return false;
6753}
6754
6755/*
6756 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6757 */
6758static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6759{
6760 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6761 unsigned long flags;
6762 bool res;
6763
6764 spin_lock_irqsave(&sdsc->lock, flags);
6765 res = scsi_debug_stop_cmnd(cmnd);
6766 spin_unlock_irqrestore(&sdsc->lock, flags);
6767
6768 return res;
6769}
6770
6771/*
6772 * All we can do is set the cmnd as internally aborted and wait for it to
6773 * finish. We cannot call scsi_done() as normal completion path may do that.
6774 */
6775static bool sdebug_stop_cmnd(struct request *rq, void *data)
6776{
6777 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6778
6779 return true;
6780}
6781
6782/* Deletes (stops) timers or work queues of all queued commands */
6783static void stop_all_queued(void)
6784{
6785 struct sdebug_host_info *sdhp;
6786
6787 mutex_lock(&sdebug_host_list_mutex);
6788 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6789 struct Scsi_Host *shost = sdhp->shost;
6790
6791 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6792 }
6793 mutex_unlock(&sdebug_host_list_mutex);
6794}
6795
6796static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6797{
6798 struct scsi_device *sdp = cmnd->device;
6799 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6800 struct sdebug_err_inject *err;
6801 unsigned char *cmd = cmnd->cmnd;
6802 int ret = 0;
6803
6804 if (devip == NULL)
6805 return 0;
6806
6807 rcu_read_lock();
6808 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6809 if (err->type == ERR_ABORT_CMD_FAILED &&
6810 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6811 ret = !!err->cnt;
6812 if (err->cnt < 0)
6813 err->cnt++;
6814
6815 rcu_read_unlock();
6816 return ret;
6817 }
6818 }
6819 rcu_read_unlock();
6820
6821 return 0;
6822}
6823
6824static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6825{
6826 bool aborted = scsi_debug_abort_cmnd(SCpnt);
6827 u8 *cmd = SCpnt->cmnd;
6828 u8 opcode = cmd[0];
6829
6830 ++num_aborts;
6831
6832 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6833 sdev_printk(KERN_INFO, SCpnt->device,
6834 "%s: command%s found\n", __func__,
6835 aborted ? "" : " not");
6836
6837
6838 if (sdebug_fail_abort(SCpnt)) {
6839 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6840 opcode);
6841 return FAILED;
6842 }
6843
6844 if (aborted == false)
6845 return FAILED;
6846
6847 return SUCCESS;
6848}
6849
6850static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6851{
6852 struct scsi_device *sdp = data;
6853 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6854
6855 if (scmd->device == sdp)
6856 scsi_debug_abort_cmnd(scmd);
6857
6858 return true;
6859}
6860
6861/* Deletes (stops) timers or work queues of all queued commands per sdev */
6862static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6863{
6864 struct Scsi_Host *shost = sdp->host;
6865
6866 blk_mq_tagset_busy_iter(&shost->tag_set,
6867 scsi_debug_stop_all_queued_iter, sdp);
6868}
6869
6870static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6871{
6872 struct scsi_device *sdp = cmnd->device;
6873 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6874 struct sdebug_err_inject *err;
6875 unsigned char *cmd = cmnd->cmnd;
6876 int ret = 0;
6877
6878 if (devip == NULL)
6879 return 0;
6880
6881 rcu_read_lock();
6882 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6883 if (err->type == ERR_LUN_RESET_FAILED &&
6884 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6885 ret = !!err->cnt;
6886 if (err->cnt < 0)
6887 err->cnt++;
6888
6889 rcu_read_unlock();
6890 return ret;
6891 }
6892 }
6893 rcu_read_unlock();
6894
6895 return 0;
6896}
6897
6898static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6899{
6900 int i;
6901
6902 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6903 devip->tape_density = TAPE_DEF_DENSITY;
6904 devip->tape_partition = 0;
6905 devip->tape_dce = 0;
6906 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6907 devip->tape_location[i] = 0;
6908 devip->tape_pending_nbr_partitions = -1;
6909 /* Don't reset partitioning? */
6910}
6911
6912static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6913{
6914 struct scsi_device *sdp = SCpnt->device;
6915 struct sdebug_dev_info *devip = sdp->hostdata;
6916 u8 *cmd = SCpnt->cmnd;
6917 u8 opcode = cmd[0];
6918
6919 ++num_dev_resets;
6920
6921 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6922 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6923
6924 scsi_debug_stop_all_queued(sdp);
6925 if (devip) {
6926 set_bit(SDEBUG_UA_POR, devip->uas_bm);
6927 if (SCpnt->device->type == TYPE_TAPE)
6928 scsi_tape_reset_clear(devip);
6929 }
6930
6931 if (sdebug_fail_lun_reset(SCpnt)) {
6932 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6933 return FAILED;
6934 }
6935
6936 return SUCCESS;
6937}
6938
6939static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6940{
6941 struct scsi_target *starget = scsi_target(cmnd->device);
6942 struct sdebug_target_info *targetip =
6943 (struct sdebug_target_info *)starget->hostdata;
6944
6945 if (targetip)
6946 return targetip->reset_fail;
6947
6948 return 0;
6949}
6950
6951static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6952{
6953 struct scsi_device *sdp = SCpnt->device;
6954 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6955 struct sdebug_dev_info *devip;
6956 u8 *cmd = SCpnt->cmnd;
6957 u8 opcode = cmd[0];
6958 int k = 0;
6959
6960 ++num_target_resets;
6961 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6962 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6963
6964 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6965 if (devip->target == sdp->id) {
6966 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6967 if (SCpnt->device->type == TYPE_TAPE)
6968 scsi_tape_reset_clear(devip);
6969 ++k;
6970 }
6971 }
6972
6973 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6974 sdev_printk(KERN_INFO, sdp,
6975 "%s: %d device(s) found in target\n", __func__, k);
6976
6977 if (sdebug_fail_target_reset(SCpnt)) {
6978 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6979 opcode);
6980 return FAILED;
6981 }
6982
6983 return SUCCESS;
6984}
6985
6986static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6987{
6988 struct scsi_device *sdp = SCpnt->device;
6989 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6990 struct sdebug_dev_info *devip;
6991 int k = 0;
6992
6993 ++num_bus_resets;
6994
6995 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6996 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6997
6998 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6999 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7000 if (SCpnt->device->type == TYPE_TAPE)
7001 scsi_tape_reset_clear(devip);
7002 ++k;
7003 }
7004
7005 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7006 sdev_printk(KERN_INFO, sdp,
7007 "%s: %d device(s) found in host\n", __func__, k);
7008 return SUCCESS;
7009}
7010
7011static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
7012{
7013 struct sdebug_host_info *sdbg_host;
7014 struct sdebug_dev_info *devip;
7015 int k = 0;
7016
7017 ++num_host_resets;
7018 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7019 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
7020 mutex_lock(&sdebug_host_list_mutex);
7021 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7022 list_for_each_entry(devip, &sdbg_host->dev_info_list,
7023 dev_list) {
7024 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7025 if (SCpnt->device->type == TYPE_TAPE)
7026 scsi_tape_reset_clear(devip);
7027 ++k;
7028 }
7029 }
7030 mutex_unlock(&sdebug_host_list_mutex);
7031 stop_all_queued();
7032 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7033 sdev_printk(KERN_INFO, SCpnt->device,
7034 "%s: %d device(s) found\n", __func__, k);
7035 return SUCCESS;
7036}
7037
7038static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7039{
7040 struct msdos_partition *pp;
7041 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7042 int sectors_per_part, num_sectors, k;
7043 int heads_by_sects, start_sec, end_sec;
7044
7045 /* assume partition table already zeroed */
7046 if ((sdebug_num_parts < 1) || (store_size < 1048576))
7047 return;
7048 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7049 sdebug_num_parts = SDEBUG_MAX_PARTS;
7050 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7051 }
7052 num_sectors = (int)get_sdebug_capacity();
7053 sectors_per_part = (num_sectors - sdebug_sectors_per)
7054 / sdebug_num_parts;
7055 heads_by_sects = sdebug_heads * sdebug_sectors_per;
7056 starts[0] = sdebug_sectors_per;
7057 max_part_secs = sectors_per_part;
7058 for (k = 1; k < sdebug_num_parts; ++k) {
7059 starts[k] = ((k * sectors_per_part) / heads_by_sects)
7060 * heads_by_sects;
7061 if (starts[k] - starts[k - 1] < max_part_secs)
7062 max_part_secs = starts[k] - starts[k - 1];
7063 }
7064 starts[sdebug_num_parts] = num_sectors;
7065 starts[sdebug_num_parts + 1] = 0;
7066
7067 ramp[510] = 0x55; /* magic partition markings */
7068 ramp[511] = 0xAA;
7069 pp = (struct msdos_partition *)(ramp + 0x1be);
7070 for (k = 0; starts[k + 1]; ++k, ++pp) {
7071 start_sec = starts[k];
7072 end_sec = starts[k] + max_part_secs - 1;
7073 pp->boot_ind = 0;
7074
7075 pp->cyl = start_sec / heads_by_sects;
7076 pp->head = (start_sec - (pp->cyl * heads_by_sects))
7077 / sdebug_sectors_per;
7078 pp->sector = (start_sec % sdebug_sectors_per) + 1;
7079
7080 pp->end_cyl = end_sec / heads_by_sects;
7081 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7082 / sdebug_sectors_per;
7083 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7084
7085 pp->start_sect = cpu_to_le32(start_sec);
7086 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7087 pp->sys_ind = 0x83; /* plain Linux partition */
7088 }
7089}
7090
7091static void block_unblock_all_queues(bool block)
7092{
7093 struct sdebug_host_info *sdhp;
7094
7095 lockdep_assert_held(&sdebug_host_list_mutex);
7096
7097 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7098 struct Scsi_Host *shost = sdhp->shost;
7099
7100 if (block)
7101 scsi_block_requests(shost);
7102 else
7103 scsi_unblock_requests(shost);
7104 }
7105}
7106
7107/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7108 * commands will be processed normally before triggers occur.
7109 */
7110static void tweak_cmnd_count(void)
7111{
7112 int count, modulo;
7113
7114 modulo = abs(sdebug_every_nth);
7115 if (modulo < 2)
7116 return;
7117
7118 mutex_lock(&sdebug_host_list_mutex);
7119 block_unblock_all_queues(true);
7120 count = atomic_read(&sdebug_cmnd_count);
7121 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7122 block_unblock_all_queues(false);
7123 mutex_unlock(&sdebug_host_list_mutex);
7124}
7125
7126static void clear_queue_stats(void)
7127{
7128 atomic_set(&sdebug_cmnd_count, 0);
7129 atomic_set(&sdebug_completions, 0);
7130 atomic_set(&sdebug_miss_cpus, 0);
7131 atomic_set(&sdebug_a_tsf, 0);
7132}
7133
7134static bool inject_on_this_cmd(void)
7135{
7136 if (sdebug_every_nth == 0)
7137 return false;
7138 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7139}
7140
7141#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
7142
7143/* Complete the processing of the thread that queued a SCSI command to this
7144 * driver. It either completes the command by calling cmnd_done() or
7145 * schedules a hr timer or work queue then returns 0. Returns
7146 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7147 */
7148static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7149 int scsi_result,
7150 int (*pfp)(struct scsi_cmnd *,
7151 struct sdebug_dev_info *),
7152 int delta_jiff, int ndelay)
7153{
7154 struct request *rq = scsi_cmd_to_rq(cmnd);
7155 bool polled = rq->cmd_flags & REQ_POLLED;
7156 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7157 unsigned long flags;
7158 u64 ns_from_boot = 0;
7159 struct scsi_device *sdp;
7160 struct sdebug_defer *sd_dp;
7161
7162 if (unlikely(devip == NULL)) {
7163 if (scsi_result == 0)
7164 scsi_result = DID_NO_CONNECT << 16;
7165 goto respond_in_thread;
7166 }
7167 sdp = cmnd->device;
7168
7169 if (delta_jiff == 0)
7170 goto respond_in_thread;
7171
7172
7173 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7174 (scsi_result == 0))) {
7175 int num_in_q = scsi_device_busy(sdp);
7176 int qdepth = cmnd->device->queue_depth;
7177
7178 if ((num_in_q == qdepth) &&
7179 (atomic_inc_return(&sdebug_a_tsf) >=
7180 abs(sdebug_every_nth))) {
7181 atomic_set(&sdebug_a_tsf, 0);
7182 scsi_result = device_qfull_result;
7183
7184 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7185 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7186 __func__, num_in_q);
7187 }
7188 }
7189
7190 sd_dp = &sdsc->sd_dp;
7191
7192 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7193 ns_from_boot = ktime_get_boottime_ns();
7194
7195 /* one of the resp_*() response functions is called here */
7196 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7197 if (cmnd->result & SDEG_RES_IMMED_MASK) {
7198 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7199 delta_jiff = ndelay = 0;
7200 }
7201 if (cmnd->result == 0 && scsi_result != 0)
7202 cmnd->result = scsi_result;
7203 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7204 if (atomic_read(&sdeb_inject_pending)) {
7205 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7206 atomic_set(&sdeb_inject_pending, 0);
7207 cmnd->result = check_condition_result;
7208 }
7209 }
7210
7211 if (unlikely(sdebug_verbose && cmnd->result))
7212 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7213 __func__, cmnd->result);
7214
7215 if (delta_jiff > 0 || ndelay > 0) {
7216 ktime_t kt;
7217
7218 if (delta_jiff > 0) {
7219 u64 ns = jiffies_to_nsecs(delta_jiff);
7220
7221 if (sdebug_random && ns < U32_MAX) {
7222 ns = get_random_u32_below((u32)ns);
7223 } else if (sdebug_random) {
7224 ns >>= 12; /* scale to 4 usec precision */
7225 if (ns < U32_MAX) /* over 4 hours max */
7226 ns = get_random_u32_below((u32)ns);
7227 ns <<= 12;
7228 }
7229 kt = ns_to_ktime(ns);
7230 } else { /* ndelay has a 4.2 second max */
7231 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7232 (u32)ndelay;
7233 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7234 u64 d = ktime_get_boottime_ns() - ns_from_boot;
7235
7236 if (kt <= d) { /* elapsed duration >= kt */
7237 /* call scsi_done() from this thread */
7238 scsi_done(cmnd);
7239 return 0;
7240 }
7241 /* otherwise reduce kt by elapsed time */
7242 kt -= d;
7243 }
7244 }
7245 if (sdebug_statistics)
7246 sd_dp->issuing_cpu = raw_smp_processor_id();
7247 if (polled) {
7248 spin_lock_irqsave(&sdsc->lock, flags);
7249 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7250 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7251 spin_unlock_irqrestore(&sdsc->lock, flags);
7252 } else {
7253 /* schedule the invocation of scsi_done() for a later time */
7254 spin_lock_irqsave(&sdsc->lock, flags);
7255 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7256 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7257 /*
7258 * The completion handler will try to grab sqcp->lock,
7259 * so there is no chance that the completion handler
7260 * will call scsi_done() until we release the lock
7261 * here (so ok to keep referencing sdsc).
7262 */
7263 spin_unlock_irqrestore(&sdsc->lock, flags);
7264 }
7265 } else { /* jdelay < 0, use work queue */
7266 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7267 atomic_read(&sdeb_inject_pending))) {
7268 sd_dp->aborted = true;
7269 atomic_set(&sdeb_inject_pending, 0);
7270 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7271 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7272 }
7273
7274 if (sdebug_statistics)
7275 sd_dp->issuing_cpu = raw_smp_processor_id();
7276 if (polled) {
7277 spin_lock_irqsave(&sdsc->lock, flags);
7278 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7279 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7280 spin_unlock_irqrestore(&sdsc->lock, flags);
7281 } else {
7282 spin_lock_irqsave(&sdsc->lock, flags);
7283 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7284 schedule_work(&sd_dp->ew.work);
7285 spin_unlock_irqrestore(&sdsc->lock, flags);
7286 }
7287 }
7288
7289 return 0;
7290
7291respond_in_thread: /* call back to mid-layer using invocation thread */
7292 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7293 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7294 if (cmnd->result == 0 && scsi_result != 0)
7295 cmnd->result = scsi_result;
7296 scsi_done(cmnd);
7297 return 0;
7298}
7299
7300/* Note: The following macros create attribute files in the
7301 /sys/module/scsi_debug/parameters directory. Unfortunately this
7302 driver is unaware of a change and cannot trigger auxiliary actions
7303 as it can when the corresponding attribute in the
7304 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7305 */
7306module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7307module_param_named(ato, sdebug_ato, int, S_IRUGO);
7308module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7309module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7310module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7311module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7312module_param_named(dif, sdebug_dif, int, S_IRUGO);
7313module_param_named(dix, sdebug_dix, int, S_IRUGO);
7314module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7315module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7316module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7317module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7318module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7319module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7320module_param_string(inq_product, sdebug_inq_product_id,
7321 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7322module_param_string(inq_rev, sdebug_inq_product_rev,
7323 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7324module_param_string(inq_vendor, sdebug_inq_vendor_id,
7325 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7326module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7327module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7328module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7329module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7330module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7331module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7332module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7333module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7334module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7335module_param_named(medium_error_count, sdebug_medium_error_count, int,
7336 S_IRUGO | S_IWUSR);
7337module_param_named(medium_error_start, sdebug_medium_error_start, int,
7338 S_IRUGO | S_IWUSR);
7339module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7340module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7341module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7342module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7343module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7344module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7345module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7346module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7347module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7348module_param_named(per_host_store, sdebug_per_host_store, bool,
7349 S_IRUGO | S_IWUSR);
7350module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7351module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7352module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7353module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7354module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7355module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7356module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7357module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7358module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7359module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7360module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7361module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7362module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7363module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7364module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7365module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7366module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7367module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7368module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7369module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7370module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7371module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7372module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7373 S_IRUGO | S_IWUSR);
7374module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7375module_param_named(write_same_length, sdebug_write_same_length, int,
7376 S_IRUGO | S_IWUSR);
7377module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7378module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7379module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7380module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7381module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7382module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7383
7384MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7385MODULE_DESCRIPTION("SCSI debug adapter driver");
7386MODULE_LICENSE("GPL");
7387MODULE_VERSION(SDEBUG_VERSION);
7388
7389MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7390MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7391MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7392MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7393MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7394MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7395MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7396MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7397MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7398MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7399MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7400MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7401MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7402MODULE_PARM_DESC(host_max_queue,
7403 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7404MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7405MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7406 SDEBUG_VERSION "\")");
7407MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7408MODULE_PARM_DESC(lbprz,
7409 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7410MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7411MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7412MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7413MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7414MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7415MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7416MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7417MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7418MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7419MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7420MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7421MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7422MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7423MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7424MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7425MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7426MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7427MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7428MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7429MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7430MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7431MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7432MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7433MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7434MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7435MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7436MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7437MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7438MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7439MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7440MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7441MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7442MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7443MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7444MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7445MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7446MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7447MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7448MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7449MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7450MODULE_PARM_DESC(uuid_ctl,
7451 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7452MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7453MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7454MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7455MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7456MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7457MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7458MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7459MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7460MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7461MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7462
7463#define SDEBUG_INFO_LEN 256
7464static char sdebug_info[SDEBUG_INFO_LEN];
7465
7466static const char *scsi_debug_info(struct Scsi_Host *shp)
7467{
7468 int k;
7469
7470 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7471 my_name, SDEBUG_VERSION, sdebug_version_date);
7472 if (k >= (SDEBUG_INFO_LEN - 1))
7473 return sdebug_info;
7474 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7475 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7476 sdebug_dev_size_mb, sdebug_opts, submit_queues,
7477 "statistics", (int)sdebug_statistics);
7478 return sdebug_info;
7479}
7480
7481/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
7482static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7483 int length)
7484{
7485 char arr[16];
7486 int opts;
7487 int minLen = length > 15 ? 15 : length;
7488
7489 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7490 return -EACCES;
7491 memcpy(arr, buffer, minLen);
7492 arr[minLen] = '\0';
7493 if (1 != sscanf(arr, "%d", &opts))
7494 return -EINVAL;
7495 sdebug_opts = opts;
7496 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7497 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7498 if (sdebug_every_nth != 0)
7499 tweak_cmnd_count();
7500 return length;
7501}
7502
7503struct sdebug_submit_queue_data {
7504 int *first;
7505 int *last;
7506 int queue_num;
7507};
7508
7509static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7510{
7511 struct sdebug_submit_queue_data *data = opaque;
7512 u32 unique_tag = blk_mq_unique_tag(rq);
7513 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7514 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7515 int queue_num = data->queue_num;
7516
7517 if (hwq != queue_num)
7518 return true;
7519
7520 /* Rely on iter'ing in ascending tag order */
7521 if (*data->first == -1)
7522 *data->first = *data->last = tag;
7523 else
7524 *data->last = tag;
7525
7526 return true;
7527}
7528
7529/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7530 * same for each scsi_debug host (if more than one). Some of the counters
7531 * output are not atomics so might be inaccurate in a busy system. */
7532static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7533{
7534 struct sdebug_host_info *sdhp;
7535 int j;
7536
7537 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7538 SDEBUG_VERSION, sdebug_version_date);
7539 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7540 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7541 sdebug_opts, sdebug_every_nth);
7542 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7543 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7544 sdebug_sector_size, "bytes");
7545 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7546 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7547 num_aborts);
7548 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7549 num_dev_resets, num_target_resets, num_bus_resets,
7550 num_host_resets);
7551 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7552 dix_reads, dix_writes, dif_errors);
7553 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7554 sdebug_statistics);
7555 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7556 atomic_read(&sdebug_cmnd_count),
7557 atomic_read(&sdebug_completions),
7558 "miss_cpus", atomic_read(&sdebug_miss_cpus),
7559 atomic_read(&sdebug_a_tsf),
7560 atomic_read(&sdeb_mq_poll_count));
7561
7562 seq_printf(m, "submit_queues=%d\n", submit_queues);
7563 for (j = 0; j < submit_queues; ++j) {
7564 int f = -1, l = -1;
7565 struct sdebug_submit_queue_data data = {
7566 .queue_num = j,
7567 .first = &f,
7568 .last = &l,
7569 };
7570 seq_printf(m, " queue %d:\n", j);
7571 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7572 &data);
7573 if (f >= 0) {
7574 seq_printf(m, " BUSY: %s: %d,%d\n",
7575 "first,last bits", f, l);
7576 }
7577 }
7578
7579 seq_printf(m, "this host_no=%d\n", host->host_no);
7580 if (!xa_empty(per_store_ap)) {
7581 bool niu;
7582 int idx;
7583 unsigned long l_idx;
7584 struct sdeb_store_info *sip;
7585
7586 seq_puts(m, "\nhost list:\n");
7587 j = 0;
7588 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7589 idx = sdhp->si_idx;
7590 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
7591 sdhp->shost->host_no, idx);
7592 ++j;
7593 }
7594 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7595 sdeb_most_recent_idx);
7596 j = 0;
7597 xa_for_each(per_store_ap, l_idx, sip) {
7598 niu = xa_get_mark(per_store_ap, l_idx,
7599 SDEB_XA_NOT_IN_USE);
7600 idx = (int)l_idx;
7601 seq_printf(m, " %d: idx=%d%s\n", j, idx,
7602 (niu ? " not_in_use" : ""));
7603 ++j;
7604 }
7605 }
7606 return 0;
7607}
7608
7609static ssize_t delay_show(struct device_driver *ddp, char *buf)
7610{
7611 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7612}
7613/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7614 * of delay is jiffies.
7615 */
7616static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7617 size_t count)
7618{
7619 int jdelay, res;
7620
7621 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7622 res = count;
7623 if (sdebug_jdelay != jdelay) {
7624 struct sdebug_host_info *sdhp;
7625
7626 mutex_lock(&sdebug_host_list_mutex);
7627 block_unblock_all_queues(true);
7628
7629 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7630 struct Scsi_Host *shost = sdhp->shost;
7631
7632 if (scsi_host_busy(shost)) {
7633 res = -EBUSY; /* queued commands */
7634 break;
7635 }
7636 }
7637 if (res > 0) {
7638 sdebug_jdelay = jdelay;
7639 sdebug_ndelay = 0;
7640 }
7641 block_unblock_all_queues(false);
7642 mutex_unlock(&sdebug_host_list_mutex);
7643 }
7644 return res;
7645 }
7646 return -EINVAL;
7647}
7648static DRIVER_ATTR_RW(delay);
7649
7650static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7651{
7652 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7653}
7654/* Returns -EBUSY if ndelay is being changed and commands are queued */
7655/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
7656static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7657 size_t count)
7658{
7659 int ndelay, res;
7660
7661 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7662 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7663 res = count;
7664 if (sdebug_ndelay != ndelay) {
7665 struct sdebug_host_info *sdhp;
7666
7667 mutex_lock(&sdebug_host_list_mutex);
7668 block_unblock_all_queues(true);
7669
7670 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7671 struct Scsi_Host *shost = sdhp->shost;
7672
7673 if (scsi_host_busy(shost)) {
7674 res = -EBUSY; /* queued commands */
7675 break;
7676 }
7677 }
7678
7679 if (res > 0) {
7680 sdebug_ndelay = ndelay;
7681 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
7682 : DEF_JDELAY;
7683 }
7684 block_unblock_all_queues(false);
7685 mutex_unlock(&sdebug_host_list_mutex);
7686 }
7687 return res;
7688 }
7689 return -EINVAL;
7690}
7691static DRIVER_ATTR_RW(ndelay);
7692
7693static ssize_t opts_show(struct device_driver *ddp, char *buf)
7694{
7695 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7696}
7697
7698static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7699 size_t count)
7700{
7701 int opts;
7702 char work[20];
7703
7704 if (sscanf(buf, "%10s", work) == 1) {
7705 if (strncasecmp(work, "0x", 2) == 0) {
7706 if (kstrtoint(work + 2, 16, &opts) == 0)
7707 goto opts_done;
7708 } else {
7709 if (kstrtoint(work, 10, &opts) == 0)
7710 goto opts_done;
7711 }
7712 }
7713 return -EINVAL;
7714opts_done:
7715 sdebug_opts = opts;
7716 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7717 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7718 tweak_cmnd_count();
7719 return count;
7720}
7721static DRIVER_ATTR_RW(opts);
7722
7723static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7724{
7725 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7726}
7727static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7728 size_t count)
7729{
7730 int n;
7731
7732 /* Cannot change from or to TYPE_ZBC with sysfs */
7733 if (sdebug_ptype == TYPE_ZBC)
7734 return -EINVAL;
7735
7736 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7737 if (n == TYPE_ZBC)
7738 return -EINVAL;
7739 sdebug_ptype = n;
7740 return count;
7741 }
7742 return -EINVAL;
7743}
7744static DRIVER_ATTR_RW(ptype);
7745
7746static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7747{
7748 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7749}
7750static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7751 size_t count)
7752{
7753 int n;
7754
7755 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7756 sdebug_dsense = n;
7757 return count;
7758 }
7759 return -EINVAL;
7760}
7761static DRIVER_ATTR_RW(dsense);
7762
7763static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7764{
7765 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7766}
7767static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7768 size_t count)
7769{
7770 int n, idx;
7771
7772 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7773 bool want_store = (n == 0);
7774 struct sdebug_host_info *sdhp;
7775
7776 n = (n > 0);
7777 sdebug_fake_rw = (sdebug_fake_rw > 0);
7778 if (sdebug_fake_rw == n)
7779 return count; /* not transitioning so do nothing */
7780
7781 if (want_store) { /* 1 --> 0 transition, set up store */
7782 if (sdeb_first_idx < 0) {
7783 idx = sdebug_add_store();
7784 if (idx < 0)
7785 return idx;
7786 } else {
7787 idx = sdeb_first_idx;
7788 xa_clear_mark(per_store_ap, idx,
7789 SDEB_XA_NOT_IN_USE);
7790 }
7791 /* make all hosts use same store */
7792 list_for_each_entry(sdhp, &sdebug_host_list,
7793 host_list) {
7794 if (sdhp->si_idx != idx) {
7795 xa_set_mark(per_store_ap, sdhp->si_idx,
7796 SDEB_XA_NOT_IN_USE);
7797 sdhp->si_idx = idx;
7798 }
7799 }
7800 sdeb_most_recent_idx = idx;
7801 } else { /* 0 --> 1 transition is trigger for shrink */
7802 sdebug_erase_all_stores(true /* apart from first */);
7803 }
7804 sdebug_fake_rw = n;
7805 return count;
7806 }
7807 return -EINVAL;
7808}
7809static DRIVER_ATTR_RW(fake_rw);
7810
7811static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7812{
7813 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7814}
7815static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7816 size_t count)
7817{
7818 int n;
7819
7820 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7821 sdebug_no_lun_0 = n;
7822 return count;
7823 }
7824 return -EINVAL;
7825}
7826static DRIVER_ATTR_RW(no_lun_0);
7827
7828static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7829{
7830 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7831}
7832static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7833 size_t count)
7834{
7835 int n;
7836
7837 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7838 sdebug_num_tgts = n;
7839 sdebug_max_tgts_luns();
7840 return count;
7841 }
7842 return -EINVAL;
7843}
7844static DRIVER_ATTR_RW(num_tgts);
7845
7846static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7847{
7848 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7849}
7850static DRIVER_ATTR_RO(dev_size_mb);
7851
7852static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7853{
7854 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7855}
7856
7857static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7858 size_t count)
7859{
7860 bool v;
7861
7862 if (kstrtobool(buf, &v))
7863 return -EINVAL;
7864
7865 sdebug_per_host_store = v;
7866 return count;
7867}
7868static DRIVER_ATTR_RW(per_host_store);
7869
7870static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7871{
7872 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7873}
7874static DRIVER_ATTR_RO(num_parts);
7875
7876static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7877{
7878 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7879}
7880static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7881 size_t count)
7882{
7883 int nth;
7884 char work[20];
7885
7886 if (sscanf(buf, "%10s", work) == 1) {
7887 if (strncasecmp(work, "0x", 2) == 0) {
7888 if (kstrtoint(work + 2, 16, &nth) == 0)
7889 goto every_nth_done;
7890 } else {
7891 if (kstrtoint(work, 10, &nth) == 0)
7892 goto every_nth_done;
7893 }
7894 }
7895 return -EINVAL;
7896
7897every_nth_done:
7898 sdebug_every_nth = nth;
7899 if (nth && !sdebug_statistics) {
7900 pr_info("every_nth needs statistics=1, set it\n");
7901 sdebug_statistics = true;
7902 }
7903 tweak_cmnd_count();
7904 return count;
7905}
7906static DRIVER_ATTR_RW(every_nth);
7907
7908static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7909{
7910 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7911}
7912static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7913 size_t count)
7914{
7915 int n;
7916 bool changed;
7917
7918 if (kstrtoint(buf, 0, &n))
7919 return -EINVAL;
7920 if (n >= 0) {
7921 if (n > (int)SAM_LUN_AM_FLAT) {
7922 pr_warn("only LUN address methods 0 and 1 are supported\n");
7923 return -EINVAL;
7924 }
7925 changed = ((int)sdebug_lun_am != n);
7926 sdebug_lun_am = n;
7927 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
7928 struct sdebug_host_info *sdhp;
7929 struct sdebug_dev_info *dp;
7930
7931 mutex_lock(&sdebug_host_list_mutex);
7932 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7933 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7934 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7935 }
7936 }
7937 mutex_unlock(&sdebug_host_list_mutex);
7938 }
7939 return count;
7940 }
7941 return -EINVAL;
7942}
7943static DRIVER_ATTR_RW(lun_format);
7944
7945static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7946{
7947 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7948}
7949static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7950 size_t count)
7951{
7952 int n;
7953 bool changed;
7954
7955 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7956 if (n > 256) {
7957 pr_warn("max_luns can be no more than 256\n");
7958 return -EINVAL;
7959 }
7960 changed = (sdebug_max_luns != n);
7961 sdebug_max_luns = n;
7962 sdebug_max_tgts_luns();
7963 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
7964 struct sdebug_host_info *sdhp;
7965 struct sdebug_dev_info *dp;
7966
7967 mutex_lock(&sdebug_host_list_mutex);
7968 list_for_each_entry(sdhp, &sdebug_host_list,
7969 host_list) {
7970 list_for_each_entry(dp, &sdhp->dev_info_list,
7971 dev_list) {
7972 set_bit(SDEBUG_UA_LUNS_CHANGED,
7973 dp->uas_bm);
7974 }
7975 }
7976 mutex_unlock(&sdebug_host_list_mutex);
7977 }
7978 return count;
7979 }
7980 return -EINVAL;
7981}
7982static DRIVER_ATTR_RW(max_luns);
7983
7984static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7985{
7986 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7987}
7988/* N.B. max_queue can be changed while there are queued commands. In flight
7989 * commands beyond the new max_queue will be completed. */
7990static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7991 size_t count)
7992{
7993 int n;
7994
7995 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7996 (n <= SDEBUG_CANQUEUE) &&
7997 (sdebug_host_max_queue == 0)) {
7998 mutex_lock(&sdebug_host_list_mutex);
7999
8000 /* We may only change sdebug_max_queue when we have no shosts */
8001 if (list_empty(&sdebug_host_list))
8002 sdebug_max_queue = n;
8003 else
8004 count = -EBUSY;
8005 mutex_unlock(&sdebug_host_list_mutex);
8006 return count;
8007 }
8008 return -EINVAL;
8009}
8010static DRIVER_ATTR_RW(max_queue);
8011
8012static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
8013{
8014 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
8015}
8016
8017static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8018{
8019 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8020}
8021
8022static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8023{
8024 bool v;
8025
8026 if (kstrtobool(buf, &v))
8027 return -EINVAL;
8028
8029 sdebug_no_rwlock = v;
8030 return count;
8031}
8032static DRIVER_ATTR_RW(no_rwlock);
8033
8034/*
8035 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8036 * in range [0, sdebug_host_max_queue), we can't change it.
8037 */
8038static DRIVER_ATTR_RO(host_max_queue);
8039
8040static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8041{
8042 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8043}
8044static DRIVER_ATTR_RO(no_uld);
8045
8046static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8047{
8048 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8049}
8050static DRIVER_ATTR_RO(scsi_level);
8051
8052static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8053{
8054 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8055}
8056static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8057 size_t count)
8058{
8059 int n;
8060 bool changed;
8061
8062 /* Ignore capacity change for ZBC drives for now */
8063 if (sdeb_zbc_in_use)
8064 return -ENOTSUPP;
8065
8066 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8067 changed = (sdebug_virtual_gb != n);
8068 sdebug_virtual_gb = n;
8069 sdebug_capacity = get_sdebug_capacity();
8070 if (changed) {
8071 struct sdebug_host_info *sdhp;
8072 struct sdebug_dev_info *dp;
8073
8074 mutex_lock(&sdebug_host_list_mutex);
8075 list_for_each_entry(sdhp, &sdebug_host_list,
8076 host_list) {
8077 list_for_each_entry(dp, &sdhp->dev_info_list,
8078 dev_list) {
8079 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8080 dp->uas_bm);
8081 }
8082 }
8083 mutex_unlock(&sdebug_host_list_mutex);
8084 }
8085 return count;
8086 }
8087 return -EINVAL;
8088}
8089static DRIVER_ATTR_RW(virtual_gb);
8090
8091static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8092{
8093 /* absolute number of hosts currently active is what is shown */
8094 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8095}
8096
8097static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8098 size_t count)
8099{
8100 bool found;
8101 unsigned long idx;
8102 struct sdeb_store_info *sip;
8103 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8104 int delta_hosts;
8105
8106 if (sscanf(buf, "%d", &delta_hosts) != 1)
8107 return -EINVAL;
8108 if (delta_hosts > 0) {
8109 do {
8110 found = false;
8111 if (want_phs) {
8112 xa_for_each_marked(per_store_ap, idx, sip,
8113 SDEB_XA_NOT_IN_USE) {
8114 sdeb_most_recent_idx = (int)idx;
8115 found = true;
8116 break;
8117 }
8118 if (found) /* re-use case */
8119 sdebug_add_host_helper((int)idx);
8120 else
8121 sdebug_do_add_host(true);
8122 } else {
8123 sdebug_do_add_host(false);
8124 }
8125 } while (--delta_hosts);
8126 } else if (delta_hosts < 0) {
8127 do {
8128 sdebug_do_remove_host(false);
8129 } while (++delta_hosts);
8130 }
8131 return count;
8132}
8133static DRIVER_ATTR_RW(add_host);
8134
8135static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8136{
8137 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8138}
8139static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8140 size_t count)
8141{
8142 int n;
8143
8144 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8145 sdebug_vpd_use_hostno = n;
8146 return count;
8147 }
8148 return -EINVAL;
8149}
8150static DRIVER_ATTR_RW(vpd_use_hostno);
8151
8152static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8153{
8154 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8155}
8156static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8157 size_t count)
8158{
8159 int n;
8160
8161 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8162 if (n > 0)
8163 sdebug_statistics = true;
8164 else {
8165 clear_queue_stats();
8166 sdebug_statistics = false;
8167 }
8168 return count;
8169 }
8170 return -EINVAL;
8171}
8172static DRIVER_ATTR_RW(statistics);
8173
8174static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8175{
8176 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8177}
8178static DRIVER_ATTR_RO(sector_size);
8179
8180static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8181{
8182 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8183}
8184static DRIVER_ATTR_RO(submit_queues);
8185
8186static ssize_t dix_show(struct device_driver *ddp, char *buf)
8187{
8188 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8189}
8190static DRIVER_ATTR_RO(dix);
8191
8192static ssize_t dif_show(struct device_driver *ddp, char *buf)
8193{
8194 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8195}
8196static DRIVER_ATTR_RO(dif);
8197
8198static ssize_t guard_show(struct device_driver *ddp, char *buf)
8199{
8200 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8201}
8202static DRIVER_ATTR_RO(guard);
8203
8204static ssize_t ato_show(struct device_driver *ddp, char *buf)
8205{
8206 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8207}
8208static DRIVER_ATTR_RO(ato);
8209
8210static ssize_t map_show(struct device_driver *ddp, char *buf)
8211{
8212 ssize_t count = 0;
8213
8214 if (!scsi_debug_lbp())
8215 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8216 sdebug_store_sectors);
8217
8218 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8219 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8220
8221 if (sip)
8222 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8223 (int)map_size, sip->map_storep);
8224 }
8225 buf[count++] = '\n';
8226 buf[count] = '\0';
8227
8228 return count;
8229}
8230static DRIVER_ATTR_RO(map);
8231
8232static ssize_t random_show(struct device_driver *ddp, char *buf)
8233{
8234 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8235}
8236
8237static ssize_t random_store(struct device_driver *ddp, const char *buf,
8238 size_t count)
8239{
8240 bool v;
8241
8242 if (kstrtobool(buf, &v))
8243 return -EINVAL;
8244
8245 sdebug_random = v;
8246 return count;
8247}
8248static DRIVER_ATTR_RW(random);
8249
8250static ssize_t removable_show(struct device_driver *ddp, char *buf)
8251{
8252 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8253}
8254static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8255 size_t count)
8256{
8257 int n;
8258
8259 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8260 sdebug_removable = (n > 0);
8261 return count;
8262 }
8263 return -EINVAL;
8264}
8265static DRIVER_ATTR_RW(removable);
8266
8267static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8268{
8269 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8270}
8271/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
8272static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8273 size_t count)
8274{
8275 int n;
8276
8277 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8278 sdebug_host_lock = (n > 0);
8279 return count;
8280 }
8281 return -EINVAL;
8282}
8283static DRIVER_ATTR_RW(host_lock);
8284
8285static ssize_t strict_show(struct device_driver *ddp, char *buf)
8286{
8287 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8288}
8289static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8290 size_t count)
8291{
8292 int n;
8293
8294 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8295 sdebug_strict = (n > 0);
8296 return count;
8297 }
8298 return -EINVAL;
8299}
8300static DRIVER_ATTR_RW(strict);
8301
8302static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8303{
8304 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8305}
8306static DRIVER_ATTR_RO(uuid_ctl);
8307
8308static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8309{
8310 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8311}
8312static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8313 size_t count)
8314{
8315 int ret, n;
8316
8317 ret = kstrtoint(buf, 0, &n);
8318 if (ret)
8319 return ret;
8320 sdebug_cdb_len = n;
8321 all_config_cdb_len();
8322 return count;
8323}
8324static DRIVER_ATTR_RW(cdb_len);
8325
8326static const char * const zbc_model_strs_a[] = {
8327 [BLK_ZONED_NONE] = "none",
8328 [BLK_ZONED_HA] = "host-aware",
8329 [BLK_ZONED_HM] = "host-managed",
8330};
8331
8332static const char * const zbc_model_strs_b[] = {
8333 [BLK_ZONED_NONE] = "no",
8334 [BLK_ZONED_HA] = "aware",
8335 [BLK_ZONED_HM] = "managed",
8336};
8337
8338static const char * const zbc_model_strs_c[] = {
8339 [BLK_ZONED_NONE] = "0",
8340 [BLK_ZONED_HA] = "1",
8341 [BLK_ZONED_HM] = "2",
8342};
8343
8344static int sdeb_zbc_model_str(const char *cp)
8345{
8346 int res = sysfs_match_string(zbc_model_strs_a, cp);
8347
8348 if (res < 0) {
8349 res = sysfs_match_string(zbc_model_strs_b, cp);
8350 if (res < 0) {
8351 res = sysfs_match_string(zbc_model_strs_c, cp);
8352 if (res < 0)
8353 return -EINVAL;
8354 }
8355 }
8356 return res;
8357}
8358
8359static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8360{
8361 return scnprintf(buf, PAGE_SIZE, "%s\n",
8362 zbc_model_strs_a[sdeb_zbc_model]);
8363}
8364static DRIVER_ATTR_RO(zbc);
8365
8366static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8367{
8368 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8369}
8370static DRIVER_ATTR_RO(tur_ms_to_ready);
8371
8372static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8373{
8374 char *p = buf, *end = buf + PAGE_SIZE;
8375 int i;
8376
8377 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8378 p += scnprintf(p, end - p, "%d %ld\n", i,
8379 atomic_long_read(&writes_by_group_number[i]));
8380
8381 return p - buf;
8382}
8383
8384static ssize_t group_number_stats_store(struct device_driver *ddp,
8385 const char *buf, size_t count)
8386{
8387 int i;
8388
8389 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8390 atomic_long_set(&writes_by_group_number[i], 0);
8391
8392 return count;
8393}
8394static DRIVER_ATTR_RW(group_number_stats);
8395
8396/* Note: The following array creates attribute files in the
8397 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8398 files (over those found in the /sys/module/scsi_debug/parameters
8399 directory) is that auxiliary actions can be triggered when an attribute
8400 is changed. For example see: add_host_store() above.
8401 */
8402
8403static struct attribute *sdebug_drv_attrs[] = {
8404 &driver_attr_delay.attr,
8405 &driver_attr_opts.attr,
8406 &driver_attr_ptype.attr,
8407 &driver_attr_dsense.attr,
8408 &driver_attr_fake_rw.attr,
8409 &driver_attr_host_max_queue.attr,
8410 &driver_attr_no_lun_0.attr,
8411 &driver_attr_num_tgts.attr,
8412 &driver_attr_dev_size_mb.attr,
8413 &driver_attr_num_parts.attr,
8414 &driver_attr_every_nth.attr,
8415 &driver_attr_lun_format.attr,
8416 &driver_attr_max_luns.attr,
8417 &driver_attr_max_queue.attr,
8418 &driver_attr_no_rwlock.attr,
8419 &driver_attr_no_uld.attr,
8420 &driver_attr_scsi_level.attr,
8421 &driver_attr_virtual_gb.attr,
8422 &driver_attr_add_host.attr,
8423 &driver_attr_per_host_store.attr,
8424 &driver_attr_vpd_use_hostno.attr,
8425 &driver_attr_sector_size.attr,
8426 &driver_attr_statistics.attr,
8427 &driver_attr_submit_queues.attr,
8428 &driver_attr_dix.attr,
8429 &driver_attr_dif.attr,
8430 &driver_attr_guard.attr,
8431 &driver_attr_ato.attr,
8432 &driver_attr_map.attr,
8433 &driver_attr_random.attr,
8434 &driver_attr_removable.attr,
8435 &driver_attr_host_lock.attr,
8436 &driver_attr_ndelay.attr,
8437 &driver_attr_strict.attr,
8438 &driver_attr_uuid_ctl.attr,
8439 &driver_attr_cdb_len.attr,
8440 &driver_attr_tur_ms_to_ready.attr,
8441 &driver_attr_zbc.attr,
8442 &driver_attr_group_number_stats.attr,
8443 NULL,
8444};
8445ATTRIBUTE_GROUPS(sdebug_drv);
8446
8447static struct device *pseudo_primary;
8448
8449static int __init scsi_debug_init(void)
8450{
8451 bool want_store = (sdebug_fake_rw == 0);
8452 unsigned long sz;
8453 int k, ret, hosts_to_add;
8454 int idx = -1;
8455
8456 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8457 pr_warn("ndelay must be less than 1 second, ignored\n");
8458 sdebug_ndelay = 0;
8459 } else if (sdebug_ndelay > 0)
8460 sdebug_jdelay = JDELAY_OVERRIDDEN;
8461
8462 switch (sdebug_sector_size) {
8463 case 512:
8464 case 1024:
8465 case 2048:
8466 case 4096:
8467 break;
8468 default:
8469 pr_err("invalid sector_size %d\n", sdebug_sector_size);
8470 return -EINVAL;
8471 }
8472
8473 switch (sdebug_dif) {
8474 case T10_PI_TYPE0_PROTECTION:
8475 break;
8476 case T10_PI_TYPE1_PROTECTION:
8477 case T10_PI_TYPE2_PROTECTION:
8478 case T10_PI_TYPE3_PROTECTION:
8479 have_dif_prot = true;
8480 break;
8481
8482 default:
8483 pr_err("dif must be 0, 1, 2 or 3\n");
8484 return -EINVAL;
8485 }
8486
8487 if (sdebug_num_tgts < 0) {
8488 pr_err("num_tgts must be >= 0\n");
8489 return -EINVAL;
8490 }
8491
8492 if (sdebug_guard > 1) {
8493 pr_err("guard must be 0 or 1\n");
8494 return -EINVAL;
8495 }
8496
8497 if (sdebug_ato > 1) {
8498 pr_err("ato must be 0 or 1\n");
8499 return -EINVAL;
8500 }
8501
8502 if (sdebug_physblk_exp > 15) {
8503 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8504 return -EINVAL;
8505 }
8506
8507 sdebug_lun_am = sdebug_lun_am_i;
8508 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8509 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8510 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8511 }
8512
8513 if (sdebug_max_luns > 256) {
8514 if (sdebug_max_luns > 16384) {
8515 pr_warn("max_luns can be no more than 16384, use default\n");
8516 sdebug_max_luns = DEF_MAX_LUNS;
8517 }
8518 sdebug_lun_am = SAM_LUN_AM_FLAT;
8519 }
8520
8521 if (sdebug_lowest_aligned > 0x3fff) {
8522 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8523 return -EINVAL;
8524 }
8525
8526 if (submit_queues < 1) {
8527 pr_err("submit_queues must be 1 or more\n");
8528 return -EINVAL;
8529 }
8530
8531 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8532 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8533 return -EINVAL;
8534 }
8535
8536 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8537 (sdebug_host_max_queue < 0)) {
8538 pr_err("host_max_queue must be in range [0 %d]\n",
8539 SDEBUG_CANQUEUE);
8540 return -EINVAL;
8541 }
8542
8543 if (sdebug_host_max_queue &&
8544 (sdebug_max_queue != sdebug_host_max_queue)) {
8545 sdebug_max_queue = sdebug_host_max_queue;
8546 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8547 sdebug_max_queue);
8548 }
8549
8550 /*
8551 * check for host managed zoned block device specified with
8552 * ptype=0x14 or zbc=XXX.
8553 */
8554 if (sdebug_ptype == TYPE_ZBC) {
8555 sdeb_zbc_model = BLK_ZONED_HM;
8556 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8557 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8558 if (k < 0)
8559 return k;
8560 sdeb_zbc_model = k;
8561 switch (sdeb_zbc_model) {
8562 case BLK_ZONED_NONE:
8563 case BLK_ZONED_HA:
8564 sdebug_ptype = TYPE_DISK;
8565 break;
8566 case BLK_ZONED_HM:
8567 sdebug_ptype = TYPE_ZBC;
8568 break;
8569 default:
8570 pr_err("Invalid ZBC model\n");
8571 return -EINVAL;
8572 }
8573 }
8574 if (sdeb_zbc_model != BLK_ZONED_NONE) {
8575 sdeb_zbc_in_use = true;
8576 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8577 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8578 }
8579
8580 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8581 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8582 if (sdebug_dev_size_mb < 1)
8583 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
8584 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8585 sdebug_store_sectors = sz / sdebug_sector_size;
8586 sdebug_capacity = get_sdebug_capacity();
8587
8588 /* play around with geometry, don't waste too much on track 0 */
8589 sdebug_heads = 8;
8590 sdebug_sectors_per = 32;
8591 if (sdebug_dev_size_mb >= 256)
8592 sdebug_heads = 64;
8593 else if (sdebug_dev_size_mb >= 16)
8594 sdebug_heads = 32;
8595 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8596 (sdebug_sectors_per * sdebug_heads);
8597 if (sdebug_cylinders_per >= 1024) {
8598 /* other LLDs do this; implies >= 1GB ram disk ... */
8599 sdebug_heads = 255;
8600 sdebug_sectors_per = 63;
8601 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8602 (sdebug_sectors_per * sdebug_heads);
8603 }
8604 if (scsi_debug_lbp()) {
8605 sdebug_unmap_max_blocks =
8606 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8607
8608 sdebug_unmap_max_desc =
8609 clamp(sdebug_unmap_max_desc, 0U, 256U);
8610
8611 sdebug_unmap_granularity =
8612 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8613
8614 if (sdebug_unmap_alignment &&
8615 sdebug_unmap_granularity <=
8616 sdebug_unmap_alignment) {
8617 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8618 return -EINVAL;
8619 }
8620 }
8621
8622 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8623 if (want_store) {
8624 idx = sdebug_add_store();
8625 if (idx < 0)
8626 return idx;
8627 }
8628
8629 pseudo_primary = root_device_register("pseudo_0");
8630 if (IS_ERR(pseudo_primary)) {
8631 pr_warn("root_device_register() error\n");
8632 ret = PTR_ERR(pseudo_primary);
8633 goto free_vm;
8634 }
8635 ret = bus_register(&pseudo_lld_bus);
8636 if (ret < 0) {
8637 pr_warn("bus_register error: %d\n", ret);
8638 goto dev_unreg;
8639 }
8640 ret = driver_register(&sdebug_driverfs_driver);
8641 if (ret < 0) {
8642 pr_warn("driver_register error: %d\n", ret);
8643 goto bus_unreg;
8644 }
8645
8646 hosts_to_add = sdebug_add_host;
8647 sdebug_add_host = 0;
8648
8649 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8650 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8651 pr_info("%s: failed to create initial debugfs directory\n", __func__);
8652
8653 for (k = 0; k < hosts_to_add; k++) {
8654 if (want_store && k == 0) {
8655 ret = sdebug_add_host_helper(idx);
8656 if (ret < 0) {
8657 pr_err("add_host_helper k=%d, error=%d\n",
8658 k, -ret);
8659 break;
8660 }
8661 } else {
8662 ret = sdebug_do_add_host(want_store &&
8663 sdebug_per_host_store);
8664 if (ret < 0) {
8665 pr_err("add_host k=%d error=%d\n", k, -ret);
8666 break;
8667 }
8668 }
8669 }
8670 if (sdebug_verbose)
8671 pr_info("built %d host(s)\n", sdebug_num_hosts);
8672
8673 return 0;
8674
8675bus_unreg:
8676 bus_unregister(&pseudo_lld_bus);
8677dev_unreg:
8678 root_device_unregister(pseudo_primary);
8679free_vm:
8680 sdebug_erase_store(idx, NULL);
8681 return ret;
8682}
8683
8684static void __exit scsi_debug_exit(void)
8685{
8686 int k = sdebug_num_hosts;
8687
8688 for (; k; k--)
8689 sdebug_do_remove_host(true);
8690 driver_unregister(&sdebug_driverfs_driver);
8691 bus_unregister(&pseudo_lld_bus);
8692 root_device_unregister(pseudo_primary);
8693
8694 sdebug_erase_all_stores(false);
8695 xa_destroy(per_store_ap);
8696 debugfs_remove(sdebug_debugfs_root);
8697}
8698
8699device_initcall(scsi_debug_init);
8700module_exit(scsi_debug_exit);
8701
8702static void sdebug_release_adapter(struct device *dev)
8703{
8704 struct sdebug_host_info *sdbg_host;
8705
8706 sdbg_host = dev_to_sdebug_host(dev);
8707 kfree(sdbg_host);
8708}
8709
8710/* idx must be valid, if sip is NULL then it will be obtained using idx */
8711static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8712{
8713 if (idx < 0)
8714 return;
8715 if (!sip) {
8716 if (xa_empty(per_store_ap))
8717 return;
8718 sip = xa_load(per_store_ap, idx);
8719 if (!sip)
8720 return;
8721 }
8722 vfree(sip->map_storep);
8723 vfree(sip->dif_storep);
8724 vfree(sip->storep);
8725 xa_erase(per_store_ap, idx);
8726 kfree(sip);
8727}
8728
8729/* Assume apart_from_first==false only in shutdown case. */
8730static void sdebug_erase_all_stores(bool apart_from_first)
8731{
8732 unsigned long idx;
8733 struct sdeb_store_info *sip = NULL;
8734
8735 xa_for_each(per_store_ap, idx, sip) {
8736 if (apart_from_first)
8737 apart_from_first = false;
8738 else
8739 sdebug_erase_store(idx, sip);
8740 }
8741 if (apart_from_first)
8742 sdeb_most_recent_idx = sdeb_first_idx;
8743}
8744
8745/*
8746 * Returns store xarray new element index (idx) if >=0 else negated errno.
8747 * Limit the number of stores to 65536.
8748 */
8749static int sdebug_add_store(void)
8750{
8751 int res;
8752 u32 n_idx;
8753 unsigned long iflags;
8754 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8755 struct sdeb_store_info *sip = NULL;
8756 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8757
8758 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8759 if (!sip)
8760 return -ENOMEM;
8761
8762 xa_lock_irqsave(per_store_ap, iflags);
8763 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8764 if (unlikely(res < 0)) {
8765 xa_unlock_irqrestore(per_store_ap, iflags);
8766 kfree(sip);
8767 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8768 return res;
8769 }
8770 sdeb_most_recent_idx = n_idx;
8771 if (sdeb_first_idx < 0)
8772 sdeb_first_idx = n_idx;
8773 xa_unlock_irqrestore(per_store_ap, iflags);
8774
8775 res = -ENOMEM;
8776 sip->storep = vzalloc(sz);
8777 if (!sip->storep) {
8778 pr_err("user data oom\n");
8779 goto err;
8780 }
8781 if (sdebug_num_parts > 0)
8782 sdebug_build_parts(sip->storep, sz);
8783
8784 /* DIF/DIX: what T10 calls Protection Information (PI) */
8785 if (sdebug_dix) {
8786 int dif_size;
8787
8788 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8789 sip->dif_storep = vmalloc(dif_size);
8790
8791 pr_info("dif_storep %u bytes @ %p\n", dif_size,
8792 sip->dif_storep);
8793
8794 if (!sip->dif_storep) {
8795 pr_err("DIX oom\n");
8796 goto err;
8797 }
8798 memset(sip->dif_storep, 0xff, dif_size);
8799 }
8800 /* Logical Block Provisioning */
8801 if (scsi_debug_lbp()) {
8802 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8803 sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
8804 sizeof(long));
8805
8806 pr_info("%lu provisioning blocks\n", map_size);
8807
8808 if (!sip->map_storep) {
8809 pr_err("LBP map oom\n");
8810 goto err;
8811 }
8812
8813 /* Map first 1KB for partition table */
8814 if (sdebug_num_parts)
8815 map_region(sip, 0, 2);
8816 }
8817
8818 rwlock_init(&sip->macc_data_lck);
8819 rwlock_init(&sip->macc_meta_lck);
8820 rwlock_init(&sip->macc_sector_lck);
8821 return (int)n_idx;
8822err:
8823 sdebug_erase_store((int)n_idx, sip);
8824 pr_warn("%s: failed, errno=%d\n", __func__, -res);
8825 return res;
8826}
8827
8828static int sdebug_add_host_helper(int per_host_idx)
8829{
8830 int k, devs_per_host, idx;
8831 int error = -ENOMEM;
8832 struct sdebug_host_info *sdbg_host;
8833 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8834
8835 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8836 if (!sdbg_host)
8837 return -ENOMEM;
8838 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8839 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8840 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8841 sdbg_host->si_idx = idx;
8842
8843 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8844
8845 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8846 for (k = 0; k < devs_per_host; k++) {
8847 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8848 if (!sdbg_devinfo)
8849 goto clean;
8850 }
8851
8852 mutex_lock(&sdebug_host_list_mutex);
8853 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8854 mutex_unlock(&sdebug_host_list_mutex);
8855
8856 sdbg_host->dev.bus = &pseudo_lld_bus;
8857 sdbg_host->dev.parent = pseudo_primary;
8858 sdbg_host->dev.release = &sdebug_release_adapter;
8859 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8860
8861 error = device_register(&sdbg_host->dev);
8862 if (error) {
8863 mutex_lock(&sdebug_host_list_mutex);
8864 list_del(&sdbg_host->host_list);
8865 mutex_unlock(&sdebug_host_list_mutex);
8866 goto clean;
8867 }
8868
8869 ++sdebug_num_hosts;
8870 return 0;
8871
8872clean:
8873 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8874 dev_list) {
8875 list_del(&sdbg_devinfo->dev_list);
8876 kfree(sdbg_devinfo->zstate);
8877 kfree(sdbg_devinfo);
8878 }
8879 if (sdbg_host->dev.release)
8880 put_device(&sdbg_host->dev);
8881 else
8882 kfree(sdbg_host);
8883 pr_warn("%s: failed, errno=%d\n", __func__, -error);
8884 return error;
8885}
8886
8887static int sdebug_do_add_host(bool mk_new_store)
8888{
8889 int ph_idx = sdeb_most_recent_idx;
8890
8891 if (mk_new_store) {
8892 ph_idx = sdebug_add_store();
8893 if (ph_idx < 0)
8894 return ph_idx;
8895 }
8896 return sdebug_add_host_helper(ph_idx);
8897}
8898
8899static void sdebug_do_remove_host(bool the_end)
8900{
8901 int idx = -1;
8902 struct sdebug_host_info *sdbg_host = NULL;
8903 struct sdebug_host_info *sdbg_host2;
8904
8905 mutex_lock(&sdebug_host_list_mutex);
8906 if (!list_empty(&sdebug_host_list)) {
8907 sdbg_host = list_entry(sdebug_host_list.prev,
8908 struct sdebug_host_info, host_list);
8909 idx = sdbg_host->si_idx;
8910 }
8911 if (!the_end && idx >= 0) {
8912 bool unique = true;
8913
8914 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8915 if (sdbg_host2 == sdbg_host)
8916 continue;
8917 if (idx == sdbg_host2->si_idx) {
8918 unique = false;
8919 break;
8920 }
8921 }
8922 if (unique) {
8923 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8924 if (idx == sdeb_most_recent_idx)
8925 --sdeb_most_recent_idx;
8926 }
8927 }
8928 if (sdbg_host)
8929 list_del(&sdbg_host->host_list);
8930 mutex_unlock(&sdebug_host_list_mutex);
8931
8932 if (!sdbg_host)
8933 return;
8934
8935 device_unregister(&sdbg_host->dev);
8936 --sdebug_num_hosts;
8937}
8938
8939static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8940{
8941 struct sdebug_dev_info *devip = sdev->hostdata;
8942
8943 if (!devip)
8944 return -ENODEV;
8945
8946 mutex_lock(&sdebug_host_list_mutex);
8947 block_unblock_all_queues(true);
8948
8949 if (qdepth > SDEBUG_CANQUEUE) {
8950 qdepth = SDEBUG_CANQUEUE;
8951 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8952 qdepth, SDEBUG_CANQUEUE);
8953 }
8954 if (qdepth < 1)
8955 qdepth = 1;
8956 if (qdepth != sdev->queue_depth)
8957 scsi_change_queue_depth(sdev, qdepth);
8958
8959 block_unblock_all_queues(false);
8960 mutex_unlock(&sdebug_host_list_mutex);
8961
8962 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8963 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8964
8965 return sdev->queue_depth;
8966}
8967
8968static bool fake_timeout(struct scsi_cmnd *scp)
8969{
8970 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8971 if (sdebug_every_nth < -1)
8972 sdebug_every_nth = -1;
8973 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8974 return true; /* ignore command causing timeout */
8975 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8976 scsi_medium_access_command(scp))
8977 return true; /* time out reads and writes */
8978 }
8979 return false;
8980}
8981
8982/* Response to TUR or media access command when device stopped */
8983static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8984{
8985 int stopped_state;
8986 u64 diff_ns = 0;
8987 ktime_t now_ts = ktime_get_boottime();
8988 struct scsi_device *sdp = scp->device;
8989
8990 stopped_state = atomic_read(&devip->stopped);
8991 if (stopped_state == 2) {
8992 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8993 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8994 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8995 /* tur_ms_to_ready timer extinguished */
8996 atomic_set(&devip->stopped, 0);
8997 return 0;
8998 }
8999 }
9000 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
9001 if (sdebug_verbose)
9002 sdev_printk(KERN_INFO, sdp,
9003 "%s: Not ready: in process of becoming ready\n", my_name);
9004 if (scp->cmnd[0] == TEST_UNIT_READY) {
9005 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
9006
9007 if (diff_ns <= tur_nanosecs_to_ready)
9008 diff_ns = tur_nanosecs_to_ready - diff_ns;
9009 else
9010 diff_ns = tur_nanosecs_to_ready;
9011 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
9012 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
9013 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
9014 diff_ns);
9015 return check_condition_result;
9016 }
9017 }
9018 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9019 if (sdebug_verbose)
9020 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9021 my_name);
9022 return check_condition_result;
9023}
9024
9025static void sdebug_map_queues(struct Scsi_Host *shost)
9026{
9027 int i, qoff;
9028
9029 if (shost->nr_hw_queues == 1)
9030 return;
9031
9032 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9033 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9034
9035 map->nr_queues = 0;
9036
9037 if (i == HCTX_TYPE_DEFAULT)
9038 map->nr_queues = submit_queues - poll_queues;
9039 else if (i == HCTX_TYPE_POLL)
9040 map->nr_queues = poll_queues;
9041
9042 if (!map->nr_queues) {
9043 BUG_ON(i == HCTX_TYPE_DEFAULT);
9044 continue;
9045 }
9046
9047 map->queue_offset = qoff;
9048 blk_mq_map_queues(map);
9049
9050 qoff += map->nr_queues;
9051 }
9052}
9053
9054struct sdebug_blk_mq_poll_data {
9055 unsigned int queue_num;
9056 int *num_entries;
9057};
9058
9059/*
9060 * We don't handle aborted commands here, but it does not seem possible to have
9061 * aborted polled commands from schedule_resp()
9062 */
9063static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9064{
9065 struct sdebug_blk_mq_poll_data *data = opaque;
9066 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9067 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9068 struct sdebug_defer *sd_dp;
9069 u32 unique_tag = blk_mq_unique_tag(rq);
9070 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9071 unsigned long flags;
9072 int queue_num = data->queue_num;
9073 ktime_t time;
9074
9075 /* We're only interested in one queue for this iteration */
9076 if (hwq != queue_num)
9077 return true;
9078
9079 /* Subsequent checks would fail if this failed, but check anyway */
9080 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9081 return true;
9082
9083 time = ktime_get_boottime();
9084
9085 spin_lock_irqsave(&sdsc->lock, flags);
9086 sd_dp = &sdsc->sd_dp;
9087 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9088 spin_unlock_irqrestore(&sdsc->lock, flags);
9089 return true;
9090 }
9091
9092 if (time < sd_dp->cmpl_ts) {
9093 spin_unlock_irqrestore(&sdsc->lock, flags);
9094 return true;
9095 }
9096 spin_unlock_irqrestore(&sdsc->lock, flags);
9097
9098 if (sdebug_statistics) {
9099 atomic_inc(&sdebug_completions);
9100 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9101 atomic_inc(&sdebug_miss_cpus);
9102 }
9103
9104 scsi_done(cmd); /* callback to mid level */
9105 (*data->num_entries)++;
9106 return true;
9107}
9108
9109static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9110{
9111 int num_entries = 0;
9112 struct sdebug_blk_mq_poll_data data = {
9113 .queue_num = queue_num,
9114 .num_entries = &num_entries,
9115 };
9116
9117 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9118 &data);
9119
9120 if (num_entries > 0)
9121 atomic_add(num_entries, &sdeb_mq_poll_count);
9122 return num_entries;
9123}
9124
9125static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9126{
9127 struct scsi_device *sdp = cmnd->device;
9128 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9129 struct sdebug_err_inject *err;
9130 unsigned char *cmd = cmnd->cmnd;
9131 int ret = 0;
9132
9133 if (devip == NULL)
9134 return 0;
9135
9136 rcu_read_lock();
9137 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9138 if (err->type == ERR_TMOUT_CMD &&
9139 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9140 ret = !!err->cnt;
9141 if (err->cnt < 0)
9142 err->cnt++;
9143
9144 rcu_read_unlock();
9145 return ret;
9146 }
9147 }
9148 rcu_read_unlock();
9149
9150 return 0;
9151}
9152
9153static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9154{
9155 struct scsi_device *sdp = cmnd->device;
9156 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9157 struct sdebug_err_inject *err;
9158 unsigned char *cmd = cmnd->cmnd;
9159 int ret = 0;
9160
9161 if (devip == NULL)
9162 return 0;
9163
9164 rcu_read_lock();
9165 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9166 if (err->type == ERR_FAIL_QUEUE_CMD &&
9167 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9168 ret = err->cnt ? err->queuecmd_ret : 0;
9169 if (err->cnt < 0)
9170 err->cnt++;
9171
9172 rcu_read_unlock();
9173 return ret;
9174 }
9175 }
9176 rcu_read_unlock();
9177
9178 return 0;
9179}
9180
9181static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9182 struct sdebug_err_inject *info)
9183{
9184 struct scsi_device *sdp = cmnd->device;
9185 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9186 struct sdebug_err_inject *err;
9187 unsigned char *cmd = cmnd->cmnd;
9188 int ret = 0;
9189 int result;
9190
9191 if (devip == NULL)
9192 return 0;
9193
9194 rcu_read_lock();
9195 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9196 if (err->type == ERR_FAIL_CMD &&
9197 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9198 if (!err->cnt) {
9199 rcu_read_unlock();
9200 return 0;
9201 }
9202
9203 ret = !!err->cnt;
9204 rcu_read_unlock();
9205 goto out_handle;
9206 }
9207 }
9208 rcu_read_unlock();
9209
9210 return 0;
9211
9212out_handle:
9213 if (err->cnt < 0)
9214 err->cnt++;
9215 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9216 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9217 *info = *err;
9218 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9219
9220 return ret;
9221}
9222
9223static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9224 struct scsi_cmnd *scp)
9225{
9226 u8 sdeb_i;
9227 struct scsi_device *sdp = scp->device;
9228 const struct opcode_info_t *oip;
9229 const struct opcode_info_t *r_oip;
9230 struct sdebug_dev_info *devip;
9231 u8 *cmd = scp->cmnd;
9232 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9233 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9234 int k, na;
9235 int errsts = 0;
9236 u64 lun_index = sdp->lun & 0x3FFF;
9237 u32 flags;
9238 u16 sa;
9239 u8 opcode = cmd[0];
9240 u32 devsel = sdebug_get_devsel(scp->device);
9241 bool has_wlun_rl;
9242 bool inject_now;
9243 int ret = 0;
9244 struct sdebug_err_inject err;
9245
9246 scsi_set_resid(scp, 0);
9247 if (sdebug_statistics) {
9248 atomic_inc(&sdebug_cmnd_count);
9249 inject_now = inject_on_this_cmd();
9250 } else {
9251 inject_now = false;
9252 }
9253 if (unlikely(sdebug_verbose &&
9254 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9255 char b[120];
9256 int n, len, sb;
9257
9258 len = scp->cmd_len;
9259 sb = (int)sizeof(b);
9260 if (len > 32)
9261 strcpy(b, "too long, over 32 bytes");
9262 else {
9263 for (k = 0, n = 0; k < len && n < sb; ++k)
9264 n += scnprintf(b + n, sb - n, "%02x ",
9265 (u32)cmd[k]);
9266 }
9267 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9268 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9269 }
9270 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9271 return SCSI_MLQUEUE_HOST_BUSY;
9272 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9273 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9274 goto err_out;
9275
9276 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
9277 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
9278 devip = (struct sdebug_dev_info *)sdp->hostdata;
9279 if (unlikely(!devip)) {
9280 devip = find_build_dev_info(sdp);
9281 if (NULL == devip)
9282 goto err_out;
9283 }
9284
9285 if (sdebug_timeout_cmd(scp)) {
9286 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9287 return 0;
9288 }
9289
9290 ret = sdebug_fail_queue_cmd(scp);
9291 if (ret) {
9292 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9293 opcode, ret);
9294 return ret;
9295 }
9296
9297 if (sdebug_fail_cmd(scp, &ret, &err)) {
9298 scmd_printk(KERN_INFO, scp,
9299 "fail command 0x%x with hostbyte=0x%x, "
9300 "driverbyte=0x%x, statusbyte=0x%x, "
9301 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9302 opcode, err.host_byte, err.driver_byte,
9303 err.status_byte, err.sense_key, err.asc, err.asq);
9304 return ret;
9305 }
9306
9307 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9308 atomic_set(&sdeb_inject_pending, 1);
9309
9310 na = oip->num_attached;
9311 r_pfp = oip->pfp;
9312 if (na) { /* multiple commands with this opcode */
9313 r_oip = oip;
9314 if (FF_SA & r_oip->flags) {
9315 if (F_SA_LOW & oip->flags)
9316 sa = 0x1f & cmd[1];
9317 else
9318 sa = get_unaligned_be16(cmd + 8);
9319 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9320 if (opcode == oip->opcode && sa == oip->sa &&
9321 (devsel & oip->devsel) != 0)
9322 break;
9323 }
9324 } else { /* since no service action only check opcode */
9325 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9326 if (opcode == oip->opcode &&
9327 (devsel & oip->devsel) != 0)
9328 break;
9329 }
9330 }
9331 if (k > na) {
9332 if (F_SA_LOW & r_oip->flags)
9333 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9334 else if (F_SA_HIGH & r_oip->flags)
9335 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9336 else
9337 mk_sense_invalid_opcode(scp);
9338 goto check_cond;
9339 }
9340 } /* else (when na==0) we assume the oip is a match */
9341 flags = oip->flags;
9342 if (unlikely(F_INV_OP & flags)) {
9343 mk_sense_invalid_opcode(scp);
9344 goto check_cond;
9345 }
9346 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9347 if (sdebug_verbose)
9348 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9349 my_name, opcode, " supported for wlun");
9350 mk_sense_invalid_opcode(scp);
9351 goto check_cond;
9352 }
9353 if (unlikely(sdebug_strict)) { /* check cdb against mask */
9354 u8 rem;
9355 int j;
9356
9357 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9358 rem = ~oip->len_mask[k] & cmd[k];
9359 if (rem) {
9360 for (j = 7; j >= 0; --j, rem <<= 1) {
9361 if (0x80 & rem)
9362 break;
9363 }
9364 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9365 goto check_cond;
9366 }
9367 }
9368 }
9369 if (unlikely(!(F_SKIP_UA & flags) &&
9370 find_first_bit(devip->uas_bm,
9371 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9372 errsts = make_ua(scp, devip);
9373 if (errsts)
9374 goto check_cond;
9375 }
9376 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9377 atomic_read(&devip->stopped))) {
9378 errsts = resp_not_ready(scp, devip);
9379 if (errsts)
9380 goto fini;
9381 }
9382 if (sdebug_fake_rw && (F_FAKE_RW & flags))
9383 goto fini;
9384 if (unlikely(sdebug_every_nth)) {
9385 if (fake_timeout(scp))
9386 return 0; /* ignore command: make trouble */
9387 }
9388 if (likely(oip->pfp))
9389 pfp = oip->pfp; /* calls a resp_* function */
9390 else
9391 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
9392
9393fini:
9394 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
9395 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9396 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9397 sdebug_ndelay > 10000)) {
9398 /*
9399 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9400 * for Start Stop Unit (SSU) want at least 1 second delay and
9401 * if sdebug_jdelay>1 want a long delay of that many seconds.
9402 * For Synchronize Cache want 1/20 of SSU's delay.
9403 */
9404 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9405 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9406
9407 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9408 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9409 } else
9410 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9411 sdebug_ndelay);
9412check_cond:
9413 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9414err_out:
9415 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9416}
9417
9418static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9419{
9420 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9421 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9422
9423 spin_lock_init(&sdsc->lock);
9424 hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9425 HRTIMER_MODE_REL_PINNED);
9426 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9427
9428 return 0;
9429}
9430
9431static const struct scsi_host_template sdebug_driver_template = {
9432 .show_info = scsi_debug_show_info,
9433 .write_info = scsi_debug_write_info,
9434 .proc_name = sdebug_proc_name,
9435 .name = "SCSI DEBUG",
9436 .info = scsi_debug_info,
9437 .sdev_init = scsi_debug_sdev_init,
9438 .sdev_configure = scsi_debug_sdev_configure,
9439 .sdev_destroy = scsi_debug_sdev_destroy,
9440 .ioctl = scsi_debug_ioctl,
9441 .queuecommand = scsi_debug_queuecommand,
9442 .change_queue_depth = sdebug_change_qdepth,
9443 .map_queues = sdebug_map_queues,
9444 .mq_poll = sdebug_blk_mq_poll,
9445 .eh_abort_handler = scsi_debug_abort,
9446 .eh_device_reset_handler = scsi_debug_device_reset,
9447 .eh_target_reset_handler = scsi_debug_target_reset,
9448 .eh_bus_reset_handler = scsi_debug_bus_reset,
9449 .eh_host_reset_handler = scsi_debug_host_reset,
9450 .can_queue = SDEBUG_CANQUEUE,
9451 .this_id = 7,
9452 .sg_tablesize = SG_MAX_SEGMENTS,
9453 .cmd_per_lun = DEF_CMD_PER_LUN,
9454 .max_sectors = -1U,
9455 .max_segment_size = -1U,
9456 .module = THIS_MODULE,
9457 .skip_settle_delay = 1,
9458 .track_queue_depth = 1,
9459 .cmd_size = sizeof(struct sdebug_scsi_cmd),
9460 .init_cmd_priv = sdebug_init_cmd_priv,
9461 .target_alloc = sdebug_target_alloc,
9462 .target_destroy = sdebug_target_destroy,
9463};
9464
9465static int sdebug_driver_probe(struct device *dev)
9466{
9467 int error = 0;
9468 struct sdebug_host_info *sdbg_host;
9469 struct Scsi_Host *hpnt;
9470 int hprot;
9471
9472 sdbg_host = dev_to_sdebug_host(dev);
9473
9474 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9475 if (NULL == hpnt) {
9476 pr_err("scsi_host_alloc failed\n");
9477 error = -ENODEV;
9478 return error;
9479 }
9480 hpnt->can_queue = sdebug_max_queue;
9481 hpnt->cmd_per_lun = sdebug_max_queue;
9482 if (!sdebug_clustering)
9483 hpnt->dma_boundary = PAGE_SIZE - 1;
9484
9485 if (submit_queues > nr_cpu_ids) {
9486 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9487 my_name, submit_queues, nr_cpu_ids);
9488 submit_queues = nr_cpu_ids;
9489 }
9490 /*
9491 * Decide whether to tell scsi subsystem that we want mq. The
9492 * following should give the same answer for each host.
9493 */
9494 hpnt->nr_hw_queues = submit_queues;
9495 if (sdebug_host_max_queue)
9496 hpnt->host_tagset = 1;
9497
9498 /* poll queues are possible for nr_hw_queues > 1 */
9499 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9500 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9501 my_name, poll_queues, hpnt->nr_hw_queues);
9502 poll_queues = 0;
9503 }
9504
9505 /*
9506 * Poll queues don't need interrupts, but we need at least one I/O queue
9507 * left over for non-polled I/O.
9508 * If condition not met, trim poll_queues to 1 (just for simplicity).
9509 */
9510 if (poll_queues >= submit_queues) {
9511 if (submit_queues < 3)
9512 pr_warn("%s: trim poll_queues to 1\n", my_name);
9513 else
9514 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9515 my_name, submit_queues - 1);
9516 poll_queues = 1;
9517 }
9518 if (poll_queues)
9519 hpnt->nr_maps = 3;
9520
9521 sdbg_host->shost = hpnt;
9522 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9523 hpnt->max_id = sdebug_num_tgts + 1;
9524 else
9525 hpnt->max_id = sdebug_num_tgts;
9526 /* = sdebug_max_luns; */
9527 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9528
9529 hprot = 0;
9530
9531 switch (sdebug_dif) {
9532
9533 case T10_PI_TYPE1_PROTECTION:
9534 hprot = SHOST_DIF_TYPE1_PROTECTION;
9535 if (sdebug_dix)
9536 hprot |= SHOST_DIX_TYPE1_PROTECTION;
9537 break;
9538
9539 case T10_PI_TYPE2_PROTECTION:
9540 hprot = SHOST_DIF_TYPE2_PROTECTION;
9541 if (sdebug_dix)
9542 hprot |= SHOST_DIX_TYPE2_PROTECTION;
9543 break;
9544
9545 case T10_PI_TYPE3_PROTECTION:
9546 hprot = SHOST_DIF_TYPE3_PROTECTION;
9547 if (sdebug_dix)
9548 hprot |= SHOST_DIX_TYPE3_PROTECTION;
9549 break;
9550
9551 default:
9552 if (sdebug_dix)
9553 hprot |= SHOST_DIX_TYPE0_PROTECTION;
9554 break;
9555 }
9556
9557 scsi_host_set_prot(hpnt, hprot);
9558
9559 if (have_dif_prot || sdebug_dix)
9560 pr_info("host protection%s%s%s%s%s%s%s\n",
9561 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9562 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9563 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9564 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9565 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9566 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9567 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9568
9569 if (sdebug_guard == 1)
9570 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9571 else
9572 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9573
9574 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9575 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9576 if (sdebug_every_nth) /* need stats counters for every_nth */
9577 sdebug_statistics = true;
9578 error = scsi_add_host(hpnt, &sdbg_host->dev);
9579 if (error) {
9580 pr_err("scsi_add_host failed\n");
9581 error = -ENODEV;
9582 scsi_host_put(hpnt);
9583 } else {
9584 scsi_scan_host(hpnt);
9585 }
9586
9587 return error;
9588}
9589
9590static void sdebug_driver_remove(struct device *dev)
9591{
9592 struct sdebug_host_info *sdbg_host;
9593 struct sdebug_dev_info *sdbg_devinfo, *tmp;
9594
9595 sdbg_host = dev_to_sdebug_host(dev);
9596
9597 scsi_remove_host(sdbg_host->shost);
9598
9599 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9600 dev_list) {
9601 list_del(&sdbg_devinfo->dev_list);
9602 kfree(sdbg_devinfo->zstate);
9603 kfree(sdbg_devinfo);
9604 }
9605
9606 scsi_host_put(sdbg_host->shost);
9607}
9608
9609static const struct bus_type pseudo_lld_bus = {
9610 .name = "pseudo",
9611 .probe = sdebug_driver_probe,
9612 .remove = sdebug_driver_remove,
9613 .drv_groups = sdebug_drv_groups,
9614};