Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 *
9 * Copyright (C) 2001 - 2018 Douglas Gilbert
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 *
18 */
19
20
21#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22
23#include <linux/module.h>
24
25#include <linux/kernel.h>
26#include <linux/errno.h>
27#include <linux/jiffies.h>
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/string.h>
31#include <linux/genhd.h>
32#include <linux/fs.h>
33#include <linux/init.h>
34#include <linux/proc_fs.h>
35#include <linux/vmalloc.h>
36#include <linux/moduleparam.h>
37#include <linux/scatterlist.h>
38#include <linux/blkdev.h>
39#include <linux/crc-t10dif.h>
40#include <linux/spinlock.h>
41#include <linux/interrupt.h>
42#include <linux/atomic.h>
43#include <linux/hrtimer.h>
44#include <linux/uuid.h>
45#include <linux/t10-pi.h>
46
47#include <net/checksum.h>
48
49#include <asm/unaligned.h>
50
51#include <scsi/scsi.h>
52#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_device.h>
54#include <scsi/scsi_host.h>
55#include <scsi/scsicam.h>
56#include <scsi/scsi_eh.h>
57#include <scsi/scsi_tcq.h>
58#include <scsi/scsi_dbg.h>
59
60#include "sd.h"
61#include "scsi_logging.h"
62
63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20180128";
66
67#define MY_NAME "scsi_debug"
68
69/* Additional Sense Code (ASC) */
70#define NO_ADDITIONAL_SENSE 0x0
71#define LOGICAL_UNIT_NOT_READY 0x4
72#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73#define UNRECOVERED_READ_ERR 0x11
74#define PARAMETER_LIST_LENGTH_ERR 0x1a
75#define INVALID_OPCODE 0x20
76#define LBA_OUT_OF_RANGE 0x21
77#define INVALID_FIELD_IN_CDB 0x24
78#define INVALID_FIELD_IN_PARAM_LIST 0x26
79#define UA_RESET_ASC 0x29
80#define UA_CHANGED_ASC 0x2a
81#define TARGET_CHANGED_ASC 0x3f
82#define LUNS_CHANGED_ASCQ 0x0e
83#define INSUFF_RES_ASC 0x55
84#define INSUFF_RES_ASCQ 0x3
85#define POWER_ON_RESET_ASCQ 0x0
86#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88#define CAPACITY_CHANGED_ASCQ 0x9
89#define SAVING_PARAMS_UNSUP 0x39
90#define TRANSPORT_PROBLEM 0x4b
91#define THRESHOLD_EXCEEDED 0x5d
92#define LOW_POWER_COND_ON 0x5e
93#define MISCOMPARE_VERIFY_ASC 0x1d
94#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96#define WRITE_ERROR_ASC 0xc
97
98/* Additional Sense Code Qualifier (ASCQ) */
99#define ACK_NAK_TO 0x3
100
101/* Default values for driver parameters */
102#define DEF_NUM_HOST 1
103#define DEF_NUM_TGTS 1
104#define DEF_MAX_LUNS 1
105/* With these defaults, this driver will make 1 host with 1 target
106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 */
108#define DEF_ATO 1
109#define DEF_CDB_LEN 10
110#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
111#define DEF_DEV_SIZE_MB 8
112#define DEF_DIF 0
113#define DEF_DIX 0
114#define DEF_D_SENSE 0
115#define DEF_EVERY_NTH 0
116#define DEF_FAKE_RW 0
117#define DEF_GUARD 0
118#define DEF_HOST_LOCK 0
119#define DEF_LBPU 0
120#define DEF_LBPWS 0
121#define DEF_LBPWS10 0
122#define DEF_LBPRZ 1
123#define DEF_LOWEST_ALIGNED 0
124#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
125#define DEF_NO_LUN_0 0
126#define DEF_NUM_PARTS 0
127#define DEF_OPTS 0
128#define DEF_OPT_BLKS 1024
129#define DEF_PHYSBLK_EXP 0
130#define DEF_OPT_XFERLEN_EXP 0
131#define DEF_PTYPE TYPE_DISK
132#define DEF_REMOVABLE false
133#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134#define DEF_SECTOR_SIZE 512
135#define DEF_UNMAP_ALIGNMENT 0
136#define DEF_UNMAP_GRANULARITY 1
137#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138#define DEF_UNMAP_MAX_DESC 256
139#define DEF_VIRTUAL_GB 0
140#define DEF_VPD_USE_HOSTNO 1
141#define DEF_WRITESAME_LENGTH 0xFFFF
142#define DEF_STRICT 0
143#define DEF_STATISTICS false
144#define DEF_SUBMIT_QUEUES 1
145#define DEF_UUID_CTL 0
146#define JDELAY_OVERRIDDEN -9999
147
148#define SDEBUG_LUN_0_VAL 0
149
150/* bit mask values for sdebug_opts */
151#define SDEBUG_OPT_NOISE 1
152#define SDEBUG_OPT_MEDIUM_ERR 2
153#define SDEBUG_OPT_TIMEOUT 4
154#define SDEBUG_OPT_RECOVERED_ERR 8
155#define SDEBUG_OPT_TRANSPORT_ERR 16
156#define SDEBUG_OPT_DIF_ERR 32
157#define SDEBUG_OPT_DIX_ERR 64
158#define SDEBUG_OPT_MAC_TIMEOUT 128
159#define SDEBUG_OPT_SHORT_TRANSFER 0x100
160#define SDEBUG_OPT_Q_NOISE 0x200
161#define SDEBUG_OPT_ALL_TSF 0x400
162#define SDEBUG_OPT_RARE_TSF 0x800
163#define SDEBUG_OPT_N_WCE 0x1000
164#define SDEBUG_OPT_RESET_NOISE 0x2000
165#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
166#define SDEBUG_OPT_HOST_BUSY 0x8000
167#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 SDEBUG_OPT_RESET_NOISE)
169#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 SDEBUG_OPT_TRANSPORT_ERR | \
171 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 SDEBUG_OPT_SHORT_TRANSFER | \
173 SDEBUG_OPT_HOST_BUSY)
174/* When "every_nth" > 0 then modulo "every_nth" commands:
175 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176 * - a RECOVERED_ERROR is simulated on successful read and write
177 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
178 * - a TRANSPORT_ERROR is simulated on successful read and write
179 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
180 *
181 * When "every_nth" < 0 then after "- every_nth" commands:
182 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183 * - a RECOVERED_ERROR is simulated on successful read and write
184 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
185 * - a TRANSPORT_ERROR is simulated on successful read and write
186 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187 * This will continue on every subsequent command until some other action
188 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189 * every_nth via sysfs).
190 */
191
192/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193 * priority order. In the subset implemented here lower numbers have higher
194 * priority. The UA numbers should be a sequence starting from 0 with
195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
197#define SDEBUG_UA_BUS_RESET 1
198#define SDEBUG_UA_MODE_CHANGED 2
199#define SDEBUG_UA_CAPACITY_CHANGED 3
200#define SDEBUG_UA_LUNS_CHANGED 4
201#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
202#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203#define SDEBUG_NUM_UAS 7
204
205/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206 * sector on read commands: */
207#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
208#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
209
210/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211 * or "peripheral device" addressing (value 0) */
212#define SAM2_LUN_ADDRESS_METHOD 0
213
214/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
220 */
221#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223#define DEF_CMD_PER_LUN 255
224
225#define F_D_IN 1
226#define F_D_OUT 2
227#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228#define F_D_UNKN 8
229#define F_RL_WLUN_OK 0x10
230#define F_SKIP_UA 0x20
231#define F_DELAY_OVERR 0x40
232#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
233#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
234#define F_INV_OP 0x200
235#define F_FAKE_RW 0x400
236#define F_M_ACCESS 0x800 /* media access */
237#define F_LONG_DELAY 0x1000
238
239#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241#define FF_SA (F_SA_HIGH | F_SA_LOW)
242
243#define SDEBUG_MAX_PARTS 4
244
245#define SDEBUG_MAX_CMD_LEN 32
246
247
248struct sdebug_dev_info {
249 struct list_head dev_list;
250 unsigned int channel;
251 unsigned int target;
252 u64 lun;
253 uuid_t lu_name;
254 struct sdebug_host_info *sdbg_host;
255 unsigned long uas_bm[1];
256 atomic_t num_in_q;
257 atomic_t stopped;
258 bool used;
259};
260
261struct sdebug_host_info {
262 struct list_head host_list;
263 struct Scsi_Host *shost;
264 struct device dev;
265 struct list_head dev_info_list;
266};
267
268#define to_sdebug_host(d) \
269 container_of(d, struct sdebug_host_info, dev)
270
271enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
272 SDEB_DEFER_WQ = 2};
273
274struct sdebug_defer {
275 struct hrtimer hrt;
276 struct execute_work ew;
277 int sqa_idx; /* index of sdebug_queue array */
278 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
279 int issuing_cpu;
280 bool init_hrt;
281 bool init_wq;
282 enum sdeb_defer_type defer_t;
283};
284
285struct sdebug_queued_cmd {
286 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
287 * instance indicates this slot is in use.
288 */
289 struct sdebug_defer *sd_dp;
290 struct scsi_cmnd *a_cmnd;
291 unsigned int inj_recovered:1;
292 unsigned int inj_transport:1;
293 unsigned int inj_dif:1;
294 unsigned int inj_dix:1;
295 unsigned int inj_short:1;
296 unsigned int inj_host_busy:1;
297};
298
299struct sdebug_queue {
300 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
301 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
302 spinlock_t qc_lock;
303 atomic_t blocked; /* to temporarily stop more being queued */
304};
305
306static atomic_t sdebug_cmnd_count; /* number of incoming commands */
307static atomic_t sdebug_completions; /* count of deferred completions */
308static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
309static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
310
311struct opcode_info_t {
312 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
313 /* for terminating element */
314 u8 opcode; /* if num_attached > 0, preferred */
315 u16 sa; /* service action */
316 u32 flags; /* OR-ed set of SDEB_F_* */
317 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
318 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
319 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
320 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
321};
322
323/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
324enum sdeb_opcode_index {
325 SDEB_I_INVALID_OPCODE = 0,
326 SDEB_I_INQUIRY = 1,
327 SDEB_I_REPORT_LUNS = 2,
328 SDEB_I_REQUEST_SENSE = 3,
329 SDEB_I_TEST_UNIT_READY = 4,
330 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
331 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
332 SDEB_I_LOG_SENSE = 7,
333 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
334 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
335 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
336 SDEB_I_START_STOP = 11,
337 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
338 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
339 SDEB_I_MAINT_IN = 14,
340 SDEB_I_MAINT_OUT = 15,
341 SDEB_I_VERIFY = 16, /* 10 only */
342 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
343 SDEB_I_RESERVE = 18, /* 6, 10 */
344 SDEB_I_RELEASE = 19, /* 6, 10 */
345 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
346 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
347 SDEB_I_ATA_PT = 22, /* 12, 16 */
348 SDEB_I_SEND_DIAG = 23,
349 SDEB_I_UNMAP = 24,
350 SDEB_I_XDWRITEREAD = 25, /* 10 only */
351 SDEB_I_WRITE_BUFFER = 26,
352 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
353 SDEB_I_SYNC_CACHE = 28, /* 10, 16 */
354 SDEB_I_COMP_WRITE = 29,
355 SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
356};
357
358
359static const unsigned char opcode_ind_arr[256] = {
360/* 0x0; 0x0->0x1f: 6 byte cdbs */
361 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
362 0, 0, 0, 0,
363 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
364 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
365 SDEB_I_RELEASE,
366 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
367 SDEB_I_ALLOW_REMOVAL, 0,
368/* 0x20; 0x20->0x3f: 10 byte cdbs */
369 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
370 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
371 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
372 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
373/* 0x40; 0x40->0x5f: 10 byte cdbs */
374 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
375 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
376 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
377 SDEB_I_RELEASE,
378 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
379/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
382 0, SDEB_I_VARIABLE_LEN,
383/* 0x80; 0x80->0x9f: 16 byte cdbs */
384 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
385 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
386 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
388/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
389 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
390 SDEB_I_MAINT_OUT, 0, 0, 0,
391 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
392 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0, 0, 0,
395/* 0xc0; 0xc0->0xff: vendor specific */
396 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
400};
401
402/*
403 * The following "response" functions return the SCSI mid-level's 4 byte
404 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
405 * command completion, they can mask their return value with
406 * SDEG_RES_IMMED_MASK .
407 */
408#define SDEG_RES_IMMED_MASK 0x40000000
409
410static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
411static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
412static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
413static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
414static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
415static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
416static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
417static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
418static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
419static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
420static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
421static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
422static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
423static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
424static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
425static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
426static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
427static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
428static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
429static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
430static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
431static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
432static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
433
434/*
435 * The following are overflow arrays for cdbs that "hit" the same index in
436 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
437 * should be placed in opcode_info_arr[], the others should be placed here.
438 */
439static const struct opcode_info_t msense_iarr[] = {
440 {0, 0x1a, 0, F_D_IN, NULL, NULL,
441 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
442};
443
444static const struct opcode_info_t mselect_iarr[] = {
445 {0, 0x15, 0, F_D_OUT, NULL, NULL,
446 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
447};
448
449static const struct opcode_info_t read_iarr[] = {
450 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
451 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
452 0, 0, 0, 0} },
453 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
454 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
455 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
456 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
457 0xc7, 0, 0, 0, 0} },
458};
459
460static const struct opcode_info_t write_iarr[] = {
461 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
462 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
463 0, 0, 0, 0, 0, 0} },
464 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
465 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
466 0, 0, 0} },
467 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
468 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
469 0xbf, 0xc7, 0, 0, 0, 0} },
470};
471
472static const struct opcode_info_t sa_in_16_iarr[] = {
473 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
474 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
475 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
476};
477
478static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
479 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
480 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
481 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
482 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
483 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
484 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
485};
486
487static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
488 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
489 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
490 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
491 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
492 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
493 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
494};
495
496static const struct opcode_info_t write_same_iarr[] = {
497 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
498 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
499 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
500};
501
502static const struct opcode_info_t reserve_iarr[] = {
503 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
504 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
505};
506
507static const struct opcode_info_t release_iarr[] = {
508 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
509 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
510};
511
512static const struct opcode_info_t sync_cache_iarr[] = {
513 {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
514 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
516};
517
518
519/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
520 * plus the terminating elements for logic that scans this table such as
521 * REPORT SUPPORTED OPERATION CODES. */
522static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
523/* 0 */
524 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
525 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
527 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
529 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
530 0, 0} }, /* REPORT LUNS */
531 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
532 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
533 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
534 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
535/* 5 */
536 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
537 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
538 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
539 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
540 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
541 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
542 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
543 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
544 0, 0, 0} },
545 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
546 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
547 0, 0} },
548 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
549 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
551/* 10 */
552 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
553 resp_write_dt0, write_iarr, /* WRITE(16) */
554 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
557 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
558 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
559 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
560 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
561 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
562 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
563 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
564 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
565 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
566 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
567 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
568 0xff, 0, 0xc7, 0, 0, 0, 0} },
569/* 15 */
570 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
571 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
573 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
574 0, 0, 0, 0, 0, 0} },
575 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
576 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
577 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
578 0xff, 0xff} },
579 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
580 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
581 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
582 0} },
583 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
584 NULL, release_iarr, /* RELEASE(10) <no response function> */
585 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
586 0} },
587/* 20 */
588 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
589 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
590 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
591 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
593 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
595 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
597 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
598/* 25 */
599 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
600 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
601 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
602 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
603 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
604 0, 0, 0, 0} }, /* WRITE_BUFFER */
605 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
606 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608 0, 0, 0, 0, 0} },
609 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS,
610 resp_sync_cache, sync_cache_iarr,
611 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
613 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
614 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
615 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
616
617/* 30 */
618 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
619 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
620};
621
622static int sdebug_add_host = DEF_NUM_HOST;
623static int sdebug_ato = DEF_ATO;
624static int sdebug_cdb_len = DEF_CDB_LEN;
625static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
626static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
627static int sdebug_dif = DEF_DIF;
628static int sdebug_dix = DEF_DIX;
629static int sdebug_dsense = DEF_D_SENSE;
630static int sdebug_every_nth = DEF_EVERY_NTH;
631static int sdebug_fake_rw = DEF_FAKE_RW;
632static unsigned int sdebug_guard = DEF_GUARD;
633static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
634static int sdebug_max_luns = DEF_MAX_LUNS;
635static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
636static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
637static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
638static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
639static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
640static int sdebug_no_lun_0 = DEF_NO_LUN_0;
641static int sdebug_no_uld;
642static int sdebug_num_parts = DEF_NUM_PARTS;
643static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
644static int sdebug_opt_blks = DEF_OPT_BLKS;
645static int sdebug_opts = DEF_OPTS;
646static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
647static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
648static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
649static int sdebug_scsi_level = DEF_SCSI_LEVEL;
650static int sdebug_sector_size = DEF_SECTOR_SIZE;
651static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
652static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
653static unsigned int sdebug_lbpu = DEF_LBPU;
654static unsigned int sdebug_lbpws = DEF_LBPWS;
655static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
656static unsigned int sdebug_lbprz = DEF_LBPRZ;
657static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
658static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
659static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
660static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
661static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
662static int sdebug_uuid_ctl = DEF_UUID_CTL;
663static bool sdebug_removable = DEF_REMOVABLE;
664static bool sdebug_clustering;
665static bool sdebug_host_lock = DEF_HOST_LOCK;
666static bool sdebug_strict = DEF_STRICT;
667static bool sdebug_any_injecting_opt;
668static bool sdebug_verbose;
669static bool have_dif_prot;
670static bool sdebug_statistics = DEF_STATISTICS;
671
672static unsigned int sdebug_store_sectors;
673static sector_t sdebug_capacity; /* in sectors */
674
675/* old BIOS stuff, kernel may get rid of them but some mode sense pages
676 may still need them */
677static int sdebug_heads; /* heads per disk */
678static int sdebug_cylinders_per; /* cylinders per surface */
679static int sdebug_sectors_per; /* sectors per cylinder */
680
681static LIST_HEAD(sdebug_host_list);
682static DEFINE_SPINLOCK(sdebug_host_list_lock);
683
684static unsigned char *fake_storep; /* ramdisk storage */
685static struct t10_pi_tuple *dif_storep; /* protection info */
686static void *map_storep; /* provisioning map */
687
688static unsigned long map_size;
689static int num_aborts;
690static int num_dev_resets;
691static int num_target_resets;
692static int num_bus_resets;
693static int num_host_resets;
694static int dix_writes;
695static int dix_reads;
696static int dif_errors;
697
698static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
699static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
700
701static DEFINE_RWLOCK(atomic_rw);
702
703static char sdebug_proc_name[] = MY_NAME;
704static const char *my_name = MY_NAME;
705
706static struct bus_type pseudo_lld_bus;
707
708static struct device_driver sdebug_driverfs_driver = {
709 .name = sdebug_proc_name,
710 .bus = &pseudo_lld_bus,
711};
712
713static const int check_condition_result =
714 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
715
716static const int illegal_condition_result =
717 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
718
719static const int device_qfull_result =
720 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
721
722
723/* Only do the extra work involved in logical block provisioning if one or
724 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
725 * real reads and writes (i.e. not skipping them for speed).
726 */
727static inline bool scsi_debug_lbp(void)
728{
729 return 0 == sdebug_fake_rw &&
730 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
731}
732
733static void *fake_store(unsigned long long lba)
734{
735 lba = do_div(lba, sdebug_store_sectors);
736
737 return fake_storep + lba * sdebug_sector_size;
738}
739
740static struct t10_pi_tuple *dif_store(sector_t sector)
741{
742 sector = sector_div(sector, sdebug_store_sectors);
743
744 return dif_storep + sector;
745}
746
747static void sdebug_max_tgts_luns(void)
748{
749 struct sdebug_host_info *sdbg_host;
750 struct Scsi_Host *hpnt;
751
752 spin_lock(&sdebug_host_list_lock);
753 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
754 hpnt = sdbg_host->shost;
755 if ((hpnt->this_id >= 0) &&
756 (sdebug_num_tgts > hpnt->this_id))
757 hpnt->max_id = sdebug_num_tgts + 1;
758 else
759 hpnt->max_id = sdebug_num_tgts;
760 /* sdebug_max_luns; */
761 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
762 }
763 spin_unlock(&sdebug_host_list_lock);
764}
765
766enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
767
768/* Set in_bit to -1 to indicate no bit position of invalid field */
769static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
770 enum sdeb_cmd_data c_d,
771 int in_byte, int in_bit)
772{
773 unsigned char *sbuff;
774 u8 sks[4];
775 int sl, asc;
776
777 sbuff = scp->sense_buffer;
778 if (!sbuff) {
779 sdev_printk(KERN_ERR, scp->device,
780 "%s: sense_buffer is NULL\n", __func__);
781 return;
782 }
783 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
784 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
785 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
786 memset(sks, 0, sizeof(sks));
787 sks[0] = 0x80;
788 if (c_d)
789 sks[0] |= 0x40;
790 if (in_bit >= 0) {
791 sks[0] |= 0x8;
792 sks[0] |= 0x7 & in_bit;
793 }
794 put_unaligned_be16(in_byte, sks + 1);
795 if (sdebug_dsense) {
796 sl = sbuff[7] + 8;
797 sbuff[7] = sl;
798 sbuff[sl] = 0x2;
799 sbuff[sl + 1] = 0x6;
800 memcpy(sbuff + sl + 4, sks, 3);
801 } else
802 memcpy(sbuff + 15, sks, 3);
803 if (sdebug_verbose)
804 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
805 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
806 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
807}
808
809static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
810{
811 unsigned char *sbuff;
812
813 sbuff = scp->sense_buffer;
814 if (!sbuff) {
815 sdev_printk(KERN_ERR, scp->device,
816 "%s: sense_buffer is NULL\n", __func__);
817 return;
818 }
819 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
820
821 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
822
823 if (sdebug_verbose)
824 sdev_printk(KERN_INFO, scp->device,
825 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
826 my_name, key, asc, asq);
827}
828
829static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
830{
831 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
832}
833
834static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
835{
836 if (sdebug_verbose) {
837 if (0x1261 == cmd)
838 sdev_printk(KERN_INFO, dev,
839 "%s: BLKFLSBUF [0x1261]\n", __func__);
840 else if (0x5331 == cmd)
841 sdev_printk(KERN_INFO, dev,
842 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
843 __func__);
844 else
845 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
846 __func__, cmd);
847 }
848 return -EINVAL;
849 /* return -ENOTTY; // correct return but upsets fdisk */
850}
851
852static void config_cdb_len(struct scsi_device *sdev)
853{
854 switch (sdebug_cdb_len) {
855 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
856 sdev->use_10_for_rw = false;
857 sdev->use_16_for_rw = false;
858 sdev->use_10_for_ms = false;
859 break;
860 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
861 sdev->use_10_for_rw = true;
862 sdev->use_16_for_rw = false;
863 sdev->use_10_for_ms = false;
864 break;
865 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
866 sdev->use_10_for_rw = true;
867 sdev->use_16_for_rw = false;
868 sdev->use_10_for_ms = true;
869 break;
870 case 16:
871 sdev->use_10_for_rw = false;
872 sdev->use_16_for_rw = true;
873 sdev->use_10_for_ms = true;
874 break;
875 case 32: /* No knobs to suggest this so same as 16 for now */
876 sdev->use_10_for_rw = false;
877 sdev->use_16_for_rw = true;
878 sdev->use_10_for_ms = true;
879 break;
880 default:
881 pr_warn("unexpected cdb_len=%d, force to 10\n",
882 sdebug_cdb_len);
883 sdev->use_10_for_rw = true;
884 sdev->use_16_for_rw = false;
885 sdev->use_10_for_ms = false;
886 sdebug_cdb_len = 10;
887 break;
888 }
889}
890
891static void all_config_cdb_len(void)
892{
893 struct sdebug_host_info *sdbg_host;
894 struct Scsi_Host *shost;
895 struct scsi_device *sdev;
896
897 spin_lock(&sdebug_host_list_lock);
898 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 shost = sdbg_host->shost;
900 shost_for_each_device(sdev, shost) {
901 config_cdb_len(sdev);
902 }
903 }
904 spin_unlock(&sdebug_host_list_lock);
905}
906
907static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
908{
909 struct sdebug_host_info *sdhp;
910 struct sdebug_dev_info *dp;
911
912 spin_lock(&sdebug_host_list_lock);
913 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
914 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
915 if ((devip->sdbg_host == dp->sdbg_host) &&
916 (devip->target == dp->target))
917 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
918 }
919 }
920 spin_unlock(&sdebug_host_list_lock);
921}
922
923static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
924{
925 int k;
926
927 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
928 if (k != SDEBUG_NUM_UAS) {
929 const char *cp = NULL;
930
931 switch (k) {
932 case SDEBUG_UA_POR:
933 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
934 POWER_ON_RESET_ASCQ);
935 if (sdebug_verbose)
936 cp = "power on reset";
937 break;
938 case SDEBUG_UA_BUS_RESET:
939 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
940 BUS_RESET_ASCQ);
941 if (sdebug_verbose)
942 cp = "bus reset";
943 break;
944 case SDEBUG_UA_MODE_CHANGED:
945 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
946 MODE_CHANGED_ASCQ);
947 if (sdebug_verbose)
948 cp = "mode parameters changed";
949 break;
950 case SDEBUG_UA_CAPACITY_CHANGED:
951 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
952 CAPACITY_CHANGED_ASCQ);
953 if (sdebug_verbose)
954 cp = "capacity data changed";
955 break;
956 case SDEBUG_UA_MICROCODE_CHANGED:
957 mk_sense_buffer(scp, UNIT_ATTENTION,
958 TARGET_CHANGED_ASC,
959 MICROCODE_CHANGED_ASCQ);
960 if (sdebug_verbose)
961 cp = "microcode has been changed";
962 break;
963 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
964 mk_sense_buffer(scp, UNIT_ATTENTION,
965 TARGET_CHANGED_ASC,
966 MICROCODE_CHANGED_WO_RESET_ASCQ);
967 if (sdebug_verbose)
968 cp = "microcode has been changed without reset";
969 break;
970 case SDEBUG_UA_LUNS_CHANGED:
971 /*
972 * SPC-3 behavior is to report a UNIT ATTENTION with
973 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
974 * on the target, until a REPORT LUNS command is
975 * received. SPC-4 behavior is to report it only once.
976 * NOTE: sdebug_scsi_level does not use the same
977 * values as struct scsi_device->scsi_level.
978 */
979 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
980 clear_luns_changed_on_target(devip);
981 mk_sense_buffer(scp, UNIT_ATTENTION,
982 TARGET_CHANGED_ASC,
983 LUNS_CHANGED_ASCQ);
984 if (sdebug_verbose)
985 cp = "reported luns data has changed";
986 break;
987 default:
988 pr_warn("unexpected unit attention code=%d\n", k);
989 if (sdebug_verbose)
990 cp = "unknown";
991 break;
992 }
993 clear_bit(k, devip->uas_bm);
994 if (sdebug_verbose)
995 sdev_printk(KERN_INFO, scp->device,
996 "%s reports: Unit attention: %s\n",
997 my_name, cp);
998 return check_condition_result;
999 }
1000 return 0;
1001}
1002
1003/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1004static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1005 int arr_len)
1006{
1007 int act_len;
1008 struct scsi_data_buffer *sdb = scsi_in(scp);
1009
1010 if (!sdb->length)
1011 return 0;
1012 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1013 return DID_ERROR << 16;
1014
1015 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1016 arr, arr_len);
1017 sdb->resid = scsi_bufflen(scp) - act_len;
1018
1019 return 0;
1020}
1021
1022/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1023 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1024 * calls, not required to write in ascending offset order. Assumes resid
1025 * set to scsi_bufflen() prior to any calls.
1026 */
1027static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1028 int arr_len, unsigned int off_dst)
1029{
1030 int act_len, n;
1031 struct scsi_data_buffer *sdb = scsi_in(scp);
1032 off_t skip = off_dst;
1033
1034 if (sdb->length <= off_dst)
1035 return 0;
1036 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1037 return DID_ERROR << 16;
1038
1039 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1040 arr, arr_len, skip);
1041 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1042 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1043 n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1044 sdb->resid = min(sdb->resid, n);
1045 return 0;
1046}
1047
1048/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1049 * 'arr' or -1 if error.
1050 */
1051static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1052 int arr_len)
1053{
1054 if (!scsi_bufflen(scp))
1055 return 0;
1056 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1057 return -1;
1058
1059 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1060}
1061
1062
1063static char sdebug_inq_vendor_id[9] = "Linux ";
1064static char sdebug_inq_product_id[17] = "scsi_debug ";
1065static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1066/* Use some locally assigned NAAs for SAS addresses. */
1067static const u64 naa3_comp_a = 0x3222222000000000ULL;
1068static const u64 naa3_comp_b = 0x3333333000000000ULL;
1069static const u64 naa3_comp_c = 0x3111111000000000ULL;
1070
1071/* Device identification VPD page. Returns number of bytes placed in arr */
1072static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1073 int target_dev_id, int dev_id_num,
1074 const char *dev_id_str, int dev_id_str_len,
1075 const uuid_t *lu_name)
1076{
1077 int num, port_a;
1078 char b[32];
1079
1080 port_a = target_dev_id + 1;
1081 /* T10 vendor identifier field format (faked) */
1082 arr[0] = 0x2; /* ASCII */
1083 arr[1] = 0x1;
1084 arr[2] = 0x0;
1085 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1086 memcpy(&arr[12], sdebug_inq_product_id, 16);
1087 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1088 num = 8 + 16 + dev_id_str_len;
1089 arr[3] = num;
1090 num += 4;
1091 if (dev_id_num >= 0) {
1092 if (sdebug_uuid_ctl) {
1093 /* Locally assigned UUID */
1094 arr[num++] = 0x1; /* binary (not necessarily sas) */
1095 arr[num++] = 0xa; /* PIV=0, lu, naa */
1096 arr[num++] = 0x0;
1097 arr[num++] = 0x12;
1098 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1099 arr[num++] = 0x0;
1100 memcpy(arr + num, lu_name, 16);
1101 num += 16;
1102 } else {
1103 /* NAA-3, Logical unit identifier (binary) */
1104 arr[num++] = 0x1; /* binary (not necessarily sas) */
1105 arr[num++] = 0x3; /* PIV=0, lu, naa */
1106 arr[num++] = 0x0;
1107 arr[num++] = 0x8;
1108 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1109 num += 8;
1110 }
1111 /* Target relative port number */
1112 arr[num++] = 0x61; /* proto=sas, binary */
1113 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1114 arr[num++] = 0x0; /* reserved */
1115 arr[num++] = 0x4; /* length */
1116 arr[num++] = 0x0; /* reserved */
1117 arr[num++] = 0x0; /* reserved */
1118 arr[num++] = 0x0;
1119 arr[num++] = 0x1; /* relative port A */
1120 }
1121 /* NAA-3, Target port identifier */
1122 arr[num++] = 0x61; /* proto=sas, binary */
1123 arr[num++] = 0x93; /* piv=1, target port, naa */
1124 arr[num++] = 0x0;
1125 arr[num++] = 0x8;
1126 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1127 num += 8;
1128 /* NAA-3, Target port group identifier */
1129 arr[num++] = 0x61; /* proto=sas, binary */
1130 arr[num++] = 0x95; /* piv=1, target port group id */
1131 arr[num++] = 0x0;
1132 arr[num++] = 0x4;
1133 arr[num++] = 0;
1134 arr[num++] = 0;
1135 put_unaligned_be16(port_group_id, arr + num);
1136 num += 2;
1137 /* NAA-3, Target device identifier */
1138 arr[num++] = 0x61; /* proto=sas, binary */
1139 arr[num++] = 0xa3; /* piv=1, target device, naa */
1140 arr[num++] = 0x0;
1141 arr[num++] = 0x8;
1142 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1143 num += 8;
1144 /* SCSI name string: Target device identifier */
1145 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1146 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1147 arr[num++] = 0x0;
1148 arr[num++] = 24;
1149 memcpy(arr + num, "naa.32222220", 12);
1150 num += 12;
1151 snprintf(b, sizeof(b), "%08X", target_dev_id);
1152 memcpy(arr + num, b, 8);
1153 num += 8;
1154 memset(arr + num, 0, 4);
1155 num += 4;
1156 return num;
1157}
1158
1159static unsigned char vpd84_data[] = {
1160/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1161 0x22,0x22,0x22,0x0,0xbb,0x1,
1162 0x22,0x22,0x22,0x0,0xbb,0x2,
1163};
1164
1165/* Software interface identification VPD page */
1166static int inquiry_vpd_84(unsigned char *arr)
1167{
1168 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1169 return sizeof(vpd84_data);
1170}
1171
1172/* Management network addresses VPD page */
1173static int inquiry_vpd_85(unsigned char *arr)
1174{
1175 int num = 0;
1176 const char *na1 = "https://www.kernel.org/config";
1177 const char *na2 = "http://www.kernel.org/log";
1178 int plen, olen;
1179
1180 arr[num++] = 0x1; /* lu, storage config */
1181 arr[num++] = 0x0; /* reserved */
1182 arr[num++] = 0x0;
1183 olen = strlen(na1);
1184 plen = olen + 1;
1185 if (plen % 4)
1186 plen = ((plen / 4) + 1) * 4;
1187 arr[num++] = plen; /* length, null termianted, padded */
1188 memcpy(arr + num, na1, olen);
1189 memset(arr + num + olen, 0, plen - olen);
1190 num += plen;
1191
1192 arr[num++] = 0x4; /* lu, logging */
1193 arr[num++] = 0x0; /* reserved */
1194 arr[num++] = 0x0;
1195 olen = strlen(na2);
1196 plen = olen + 1;
1197 if (plen % 4)
1198 plen = ((plen / 4) + 1) * 4;
1199 arr[num++] = plen; /* length, null terminated, padded */
1200 memcpy(arr + num, na2, olen);
1201 memset(arr + num + olen, 0, plen - olen);
1202 num += plen;
1203
1204 return num;
1205}
1206
1207/* SCSI ports VPD page */
1208static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1209{
1210 int num = 0;
1211 int port_a, port_b;
1212
1213 port_a = target_dev_id + 1;
1214 port_b = port_a + 1;
1215 arr[num++] = 0x0; /* reserved */
1216 arr[num++] = 0x0; /* reserved */
1217 arr[num++] = 0x0;
1218 arr[num++] = 0x1; /* relative port 1 (primary) */
1219 memset(arr + num, 0, 6);
1220 num += 6;
1221 arr[num++] = 0x0;
1222 arr[num++] = 12; /* length tp descriptor */
1223 /* naa-5 target port identifier (A) */
1224 arr[num++] = 0x61; /* proto=sas, binary */
1225 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1226 arr[num++] = 0x0; /* reserved */
1227 arr[num++] = 0x8; /* length */
1228 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1229 num += 8;
1230 arr[num++] = 0x0; /* reserved */
1231 arr[num++] = 0x0; /* reserved */
1232 arr[num++] = 0x0;
1233 arr[num++] = 0x2; /* relative port 2 (secondary) */
1234 memset(arr + num, 0, 6);
1235 num += 6;
1236 arr[num++] = 0x0;
1237 arr[num++] = 12; /* length tp descriptor */
1238 /* naa-5 target port identifier (B) */
1239 arr[num++] = 0x61; /* proto=sas, binary */
1240 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1241 arr[num++] = 0x0; /* reserved */
1242 arr[num++] = 0x8; /* length */
1243 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1244 num += 8;
1245
1246 return num;
1247}
1248
1249
1250static unsigned char vpd89_data[] = {
1251/* from 4th byte */ 0,0,0,0,
1252'l','i','n','u','x',' ',' ',' ',
1253'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1254'1','2','3','4',
12550x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
12560xec,0,0,0,
12570x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
12580,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
12590x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
12600x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
12610x53,0x41,
12620x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12630x20,0x20,
12640x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
12650x10,0x80,
12660,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
12670x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
12680x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
12690,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
12700x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
12710x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
12720,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
12730,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12740,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12750,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12760x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
12770,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
12780xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
12790,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
12800,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12810,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12820,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12830,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12840,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12870,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12880,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12890,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12900,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
12910,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1292};
1293
1294/* ATA Information VPD page */
1295static int inquiry_vpd_89(unsigned char *arr)
1296{
1297 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1298 return sizeof(vpd89_data);
1299}
1300
1301
1302static unsigned char vpdb0_data[] = {
1303 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1304 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1305 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1306 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1307};
1308
1309/* Block limits VPD page (SBC-3) */
1310static int inquiry_vpd_b0(unsigned char *arr)
1311{
1312 unsigned int gran;
1313
1314 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1315
1316 /* Optimal transfer length granularity */
1317 if (sdebug_opt_xferlen_exp != 0 &&
1318 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1319 gran = 1 << sdebug_opt_xferlen_exp;
1320 else
1321 gran = 1 << sdebug_physblk_exp;
1322 put_unaligned_be16(gran, arr + 2);
1323
1324 /* Maximum Transfer Length */
1325 if (sdebug_store_sectors > 0x400)
1326 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1327
1328 /* Optimal Transfer Length */
1329 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1330
1331 if (sdebug_lbpu) {
1332 /* Maximum Unmap LBA Count */
1333 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1334
1335 /* Maximum Unmap Block Descriptor Count */
1336 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1337 }
1338
1339 /* Unmap Granularity Alignment */
1340 if (sdebug_unmap_alignment) {
1341 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1342 arr[28] |= 0x80; /* UGAVALID */
1343 }
1344
1345 /* Optimal Unmap Granularity */
1346 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1347
1348 /* Maximum WRITE SAME Length */
1349 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1350
1351 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1352
1353 return sizeof(vpdb0_data);
1354}
1355
1356/* Block device characteristics VPD page (SBC-3) */
1357static int inquiry_vpd_b1(unsigned char *arr)
1358{
1359 memset(arr, 0, 0x3c);
1360 arr[0] = 0;
1361 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1362 arr[2] = 0;
1363 arr[3] = 5; /* less than 1.8" */
1364
1365 return 0x3c;
1366}
1367
1368/* Logical block provisioning VPD page (SBC-4) */
1369static int inquiry_vpd_b2(unsigned char *arr)
1370{
1371 memset(arr, 0, 0x4);
1372 arr[0] = 0; /* threshold exponent */
1373 if (sdebug_lbpu)
1374 arr[1] = 1 << 7;
1375 if (sdebug_lbpws)
1376 arr[1] |= 1 << 6;
1377 if (sdebug_lbpws10)
1378 arr[1] |= 1 << 5;
1379 if (sdebug_lbprz && scsi_debug_lbp())
1380 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1381 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1382 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1383 /* threshold_percentage=0 */
1384 return 0x4;
1385}
1386
1387#define SDEBUG_LONG_INQ_SZ 96
1388#define SDEBUG_MAX_INQ_ARR_SZ 584
1389
1390static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1391{
1392 unsigned char pq_pdt;
1393 unsigned char *arr;
1394 unsigned char *cmd = scp->cmnd;
1395 int alloc_len, n, ret;
1396 bool have_wlun, is_disk;
1397
1398 alloc_len = get_unaligned_be16(cmd + 3);
1399 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1400 if (! arr)
1401 return DID_REQUEUE << 16;
1402 is_disk = (sdebug_ptype == TYPE_DISK);
1403 have_wlun = scsi_is_wlun(scp->device->lun);
1404 if (have_wlun)
1405 pq_pdt = TYPE_WLUN; /* present, wlun */
1406 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1407 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1408 else
1409 pq_pdt = (sdebug_ptype & 0x1f);
1410 arr[0] = pq_pdt;
1411 if (0x2 & cmd[1]) { /* CMDDT bit set */
1412 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1413 kfree(arr);
1414 return check_condition_result;
1415 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1416 int lu_id_num, port_group_id, target_dev_id, len;
1417 char lu_id_str[6];
1418 int host_no = devip->sdbg_host->shost->host_no;
1419
1420 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1421 (devip->channel & 0x7f);
1422 if (sdebug_vpd_use_hostno == 0)
1423 host_no = 0;
1424 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1425 (devip->target * 1000) + devip->lun);
1426 target_dev_id = ((host_no + 1) * 2000) +
1427 (devip->target * 1000) - 3;
1428 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1429 if (0 == cmd[2]) { /* supported vital product data pages */
1430 arr[1] = cmd[2]; /*sanity */
1431 n = 4;
1432 arr[n++] = 0x0; /* this page */
1433 arr[n++] = 0x80; /* unit serial number */
1434 arr[n++] = 0x83; /* device identification */
1435 arr[n++] = 0x84; /* software interface ident. */
1436 arr[n++] = 0x85; /* management network addresses */
1437 arr[n++] = 0x86; /* extended inquiry */
1438 arr[n++] = 0x87; /* mode page policy */
1439 arr[n++] = 0x88; /* SCSI ports */
1440 if (is_disk) { /* SBC only */
1441 arr[n++] = 0x89; /* ATA information */
1442 arr[n++] = 0xb0; /* Block limits */
1443 arr[n++] = 0xb1; /* Block characteristics */
1444 arr[n++] = 0xb2; /* Logical Block Prov */
1445 }
1446 arr[3] = n - 4; /* number of supported VPD pages */
1447 } else if (0x80 == cmd[2]) { /* unit serial number */
1448 arr[1] = cmd[2]; /*sanity */
1449 arr[3] = len;
1450 memcpy(&arr[4], lu_id_str, len);
1451 } else if (0x83 == cmd[2]) { /* device identification */
1452 arr[1] = cmd[2]; /*sanity */
1453 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1454 target_dev_id, lu_id_num,
1455 lu_id_str, len,
1456 &devip->lu_name);
1457 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1458 arr[1] = cmd[2]; /*sanity */
1459 arr[3] = inquiry_vpd_84(&arr[4]);
1460 } else if (0x85 == cmd[2]) { /* Management network addresses */
1461 arr[1] = cmd[2]; /*sanity */
1462 arr[3] = inquiry_vpd_85(&arr[4]);
1463 } else if (0x86 == cmd[2]) { /* extended inquiry */
1464 arr[1] = cmd[2]; /*sanity */
1465 arr[3] = 0x3c; /* number of following entries */
1466 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1467 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1468 else if (have_dif_prot)
1469 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1470 else
1471 arr[4] = 0x0; /* no protection stuff */
1472 arr[5] = 0x7; /* head of q, ordered + simple q's */
1473 } else if (0x87 == cmd[2]) { /* mode page policy */
1474 arr[1] = cmd[2]; /*sanity */
1475 arr[3] = 0x8; /* number of following entries */
1476 arr[4] = 0x2; /* disconnect-reconnect mp */
1477 arr[6] = 0x80; /* mlus, shared */
1478 arr[8] = 0x18; /* protocol specific lu */
1479 arr[10] = 0x82; /* mlus, per initiator port */
1480 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1481 arr[1] = cmd[2]; /*sanity */
1482 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1483 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1484 arr[1] = cmd[2]; /*sanity */
1485 n = inquiry_vpd_89(&arr[4]);
1486 put_unaligned_be16(n, arr + 2);
1487 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1488 arr[1] = cmd[2]; /*sanity */
1489 arr[3] = inquiry_vpd_b0(&arr[4]);
1490 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1491 arr[1] = cmd[2]; /*sanity */
1492 arr[3] = inquiry_vpd_b1(&arr[4]);
1493 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1494 arr[1] = cmd[2]; /*sanity */
1495 arr[3] = inquiry_vpd_b2(&arr[4]);
1496 } else {
1497 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1498 kfree(arr);
1499 return check_condition_result;
1500 }
1501 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1502 ret = fill_from_dev_buffer(scp, arr,
1503 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1504 kfree(arr);
1505 return ret;
1506 }
1507 /* drops through here for a standard inquiry */
1508 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1509 arr[2] = sdebug_scsi_level;
1510 arr[3] = 2; /* response_data_format==2 */
1511 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1512 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1513 if (sdebug_vpd_use_hostno == 0)
1514 arr[5] |= 0x10; /* claim: implicit TPGS */
1515 arr[6] = 0x10; /* claim: MultiP */
1516 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1517 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1518 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1519 memcpy(&arr[16], sdebug_inq_product_id, 16);
1520 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1521 /* Use Vendor Specific area to place driver date in ASCII hex */
1522 memcpy(&arr[36], sdebug_version_date, 8);
1523 /* version descriptors (2 bytes each) follow */
1524 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1525 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1526 n = 62;
1527 if (is_disk) { /* SBC-4 no version claimed */
1528 put_unaligned_be16(0x600, arr + n);
1529 n += 2;
1530 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1531 put_unaligned_be16(0x525, arr + n);
1532 n += 2;
1533 }
1534 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1535 ret = fill_from_dev_buffer(scp, arr,
1536 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1537 kfree(arr);
1538 return ret;
1539}
1540
1541static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1542 0, 0, 0x0, 0x0};
1543
1544static int resp_requests(struct scsi_cmnd *scp,
1545 struct sdebug_dev_info *devip)
1546{
1547 unsigned char *sbuff;
1548 unsigned char *cmd = scp->cmnd;
1549 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1550 bool dsense;
1551 int len = 18;
1552
1553 memset(arr, 0, sizeof(arr));
1554 dsense = !!(cmd[1] & 1);
1555 sbuff = scp->sense_buffer;
1556 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1557 if (dsense) {
1558 arr[0] = 0x72;
1559 arr[1] = 0x0; /* NO_SENSE in sense_key */
1560 arr[2] = THRESHOLD_EXCEEDED;
1561 arr[3] = 0xff; /* TEST set and MRIE==6 */
1562 len = 8;
1563 } else {
1564 arr[0] = 0x70;
1565 arr[2] = 0x0; /* NO_SENSE in sense_key */
1566 arr[7] = 0xa; /* 18 byte sense buffer */
1567 arr[12] = THRESHOLD_EXCEEDED;
1568 arr[13] = 0xff; /* TEST set and MRIE==6 */
1569 }
1570 } else {
1571 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1572 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1573 ; /* have sense and formats match */
1574 else if (arr[0] <= 0x70) {
1575 if (dsense) {
1576 memset(arr, 0, 8);
1577 arr[0] = 0x72;
1578 len = 8;
1579 } else {
1580 memset(arr, 0, 18);
1581 arr[0] = 0x70;
1582 arr[7] = 0xa;
1583 }
1584 } else if (dsense) {
1585 memset(arr, 0, 8);
1586 arr[0] = 0x72;
1587 arr[1] = sbuff[2]; /* sense key */
1588 arr[2] = sbuff[12]; /* asc */
1589 arr[3] = sbuff[13]; /* ascq */
1590 len = 8;
1591 } else {
1592 memset(arr, 0, 18);
1593 arr[0] = 0x70;
1594 arr[2] = sbuff[1];
1595 arr[7] = 0xa;
1596 arr[12] = sbuff[1];
1597 arr[13] = sbuff[3];
1598 }
1599
1600 }
1601 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1602 return fill_from_dev_buffer(scp, arr, len);
1603}
1604
1605static int resp_start_stop(struct scsi_cmnd *scp,
1606 struct sdebug_dev_info *devip)
1607{
1608 unsigned char *cmd = scp->cmnd;
1609 int power_cond, stop;
1610
1611 power_cond = (cmd[4] & 0xf0) >> 4;
1612 if (power_cond) {
1613 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1614 return check_condition_result;
1615 }
1616 stop = !(cmd[4] & 1);
1617 atomic_xchg(&devip->stopped, stop);
1618 return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
1619}
1620
1621static sector_t get_sdebug_capacity(void)
1622{
1623 static const unsigned int gibibyte = 1073741824;
1624
1625 if (sdebug_virtual_gb > 0)
1626 return (sector_t)sdebug_virtual_gb *
1627 (gibibyte / sdebug_sector_size);
1628 else
1629 return sdebug_store_sectors;
1630}
1631
1632#define SDEBUG_READCAP_ARR_SZ 8
1633static int resp_readcap(struct scsi_cmnd *scp,
1634 struct sdebug_dev_info *devip)
1635{
1636 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1637 unsigned int capac;
1638
1639 /* following just in case virtual_gb changed */
1640 sdebug_capacity = get_sdebug_capacity();
1641 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1642 if (sdebug_capacity < 0xffffffff) {
1643 capac = (unsigned int)sdebug_capacity - 1;
1644 put_unaligned_be32(capac, arr + 0);
1645 } else
1646 put_unaligned_be32(0xffffffff, arr + 0);
1647 put_unaligned_be16(sdebug_sector_size, arr + 6);
1648 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1649}
1650
1651#define SDEBUG_READCAP16_ARR_SZ 32
1652static int resp_readcap16(struct scsi_cmnd *scp,
1653 struct sdebug_dev_info *devip)
1654{
1655 unsigned char *cmd = scp->cmnd;
1656 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1657 int alloc_len;
1658
1659 alloc_len = get_unaligned_be32(cmd + 10);
1660 /* following just in case virtual_gb changed */
1661 sdebug_capacity = get_sdebug_capacity();
1662 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1663 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1664 put_unaligned_be32(sdebug_sector_size, arr + 8);
1665 arr[13] = sdebug_physblk_exp & 0xf;
1666 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1667
1668 if (scsi_debug_lbp()) {
1669 arr[14] |= 0x80; /* LBPME */
1670 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1671 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1672 * in the wider field maps to 0 in this field.
1673 */
1674 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1675 arr[14] |= 0x40;
1676 }
1677
1678 arr[15] = sdebug_lowest_aligned & 0xff;
1679
1680 if (have_dif_prot) {
1681 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1682 arr[12] |= 1; /* PROT_EN */
1683 }
1684
1685 return fill_from_dev_buffer(scp, arr,
1686 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1687}
1688
1689#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1690
1691static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1692 struct sdebug_dev_info *devip)
1693{
1694 unsigned char *cmd = scp->cmnd;
1695 unsigned char *arr;
1696 int host_no = devip->sdbg_host->shost->host_no;
1697 int n, ret, alen, rlen;
1698 int port_group_a, port_group_b, port_a, port_b;
1699
1700 alen = get_unaligned_be32(cmd + 6);
1701 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1702 if (! arr)
1703 return DID_REQUEUE << 16;
1704 /*
1705 * EVPD page 0x88 states we have two ports, one
1706 * real and a fake port with no device connected.
1707 * So we create two port groups with one port each
1708 * and set the group with port B to unavailable.
1709 */
1710 port_a = 0x1; /* relative port A */
1711 port_b = 0x2; /* relative port B */
1712 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1713 (devip->channel & 0x7f);
1714 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1715 (devip->channel & 0x7f) + 0x80;
1716
1717 /*
1718 * The asymmetric access state is cycled according to the host_id.
1719 */
1720 n = 4;
1721 if (sdebug_vpd_use_hostno == 0) {
1722 arr[n++] = host_no % 3; /* Asymm access state */
1723 arr[n++] = 0x0F; /* claim: all states are supported */
1724 } else {
1725 arr[n++] = 0x0; /* Active/Optimized path */
1726 arr[n++] = 0x01; /* only support active/optimized paths */
1727 }
1728 put_unaligned_be16(port_group_a, arr + n);
1729 n += 2;
1730 arr[n++] = 0; /* Reserved */
1731 arr[n++] = 0; /* Status code */
1732 arr[n++] = 0; /* Vendor unique */
1733 arr[n++] = 0x1; /* One port per group */
1734 arr[n++] = 0; /* Reserved */
1735 arr[n++] = 0; /* Reserved */
1736 put_unaligned_be16(port_a, arr + n);
1737 n += 2;
1738 arr[n++] = 3; /* Port unavailable */
1739 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1740 put_unaligned_be16(port_group_b, arr + n);
1741 n += 2;
1742 arr[n++] = 0; /* Reserved */
1743 arr[n++] = 0; /* Status code */
1744 arr[n++] = 0; /* Vendor unique */
1745 arr[n++] = 0x1; /* One port per group */
1746 arr[n++] = 0; /* Reserved */
1747 arr[n++] = 0; /* Reserved */
1748 put_unaligned_be16(port_b, arr + n);
1749 n += 2;
1750
1751 rlen = n - 4;
1752 put_unaligned_be32(rlen, arr + 0);
1753
1754 /*
1755 * Return the smallest value of either
1756 * - The allocated length
1757 * - The constructed command length
1758 * - The maximum array size
1759 */
1760 rlen = min(alen,n);
1761 ret = fill_from_dev_buffer(scp, arr,
1762 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1763 kfree(arr);
1764 return ret;
1765}
1766
1767static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1768 struct sdebug_dev_info *devip)
1769{
1770 bool rctd;
1771 u8 reporting_opts, req_opcode, sdeb_i, supp;
1772 u16 req_sa, u;
1773 u32 alloc_len, a_len;
1774 int k, offset, len, errsts, count, bump, na;
1775 const struct opcode_info_t *oip;
1776 const struct opcode_info_t *r_oip;
1777 u8 *arr;
1778 u8 *cmd = scp->cmnd;
1779
1780 rctd = !!(cmd[2] & 0x80);
1781 reporting_opts = cmd[2] & 0x7;
1782 req_opcode = cmd[3];
1783 req_sa = get_unaligned_be16(cmd + 4);
1784 alloc_len = get_unaligned_be32(cmd + 6);
1785 if (alloc_len < 4 || alloc_len > 0xffff) {
1786 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1787 return check_condition_result;
1788 }
1789 if (alloc_len > 8192)
1790 a_len = 8192;
1791 else
1792 a_len = alloc_len;
1793 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1794 if (NULL == arr) {
1795 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1796 INSUFF_RES_ASCQ);
1797 return check_condition_result;
1798 }
1799 switch (reporting_opts) {
1800 case 0: /* all commands */
1801 /* count number of commands */
1802 for (count = 0, oip = opcode_info_arr;
1803 oip->num_attached != 0xff; ++oip) {
1804 if (F_INV_OP & oip->flags)
1805 continue;
1806 count += (oip->num_attached + 1);
1807 }
1808 bump = rctd ? 20 : 8;
1809 put_unaligned_be32(count * bump, arr);
1810 for (offset = 4, oip = opcode_info_arr;
1811 oip->num_attached != 0xff && offset < a_len; ++oip) {
1812 if (F_INV_OP & oip->flags)
1813 continue;
1814 na = oip->num_attached;
1815 arr[offset] = oip->opcode;
1816 put_unaligned_be16(oip->sa, arr + offset + 2);
1817 if (rctd)
1818 arr[offset + 5] |= 0x2;
1819 if (FF_SA & oip->flags)
1820 arr[offset + 5] |= 0x1;
1821 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1822 if (rctd)
1823 put_unaligned_be16(0xa, arr + offset + 8);
1824 r_oip = oip;
1825 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1826 if (F_INV_OP & oip->flags)
1827 continue;
1828 offset += bump;
1829 arr[offset] = oip->opcode;
1830 put_unaligned_be16(oip->sa, arr + offset + 2);
1831 if (rctd)
1832 arr[offset + 5] |= 0x2;
1833 if (FF_SA & oip->flags)
1834 arr[offset + 5] |= 0x1;
1835 put_unaligned_be16(oip->len_mask[0],
1836 arr + offset + 6);
1837 if (rctd)
1838 put_unaligned_be16(0xa,
1839 arr + offset + 8);
1840 }
1841 oip = r_oip;
1842 offset += bump;
1843 }
1844 break;
1845 case 1: /* one command: opcode only */
1846 case 2: /* one command: opcode plus service action */
1847 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1848 sdeb_i = opcode_ind_arr[req_opcode];
1849 oip = &opcode_info_arr[sdeb_i];
1850 if (F_INV_OP & oip->flags) {
1851 supp = 1;
1852 offset = 4;
1853 } else {
1854 if (1 == reporting_opts) {
1855 if (FF_SA & oip->flags) {
1856 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1857 2, 2);
1858 kfree(arr);
1859 return check_condition_result;
1860 }
1861 req_sa = 0;
1862 } else if (2 == reporting_opts &&
1863 0 == (FF_SA & oip->flags)) {
1864 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1865 kfree(arr); /* point at requested sa */
1866 return check_condition_result;
1867 }
1868 if (0 == (FF_SA & oip->flags) &&
1869 req_opcode == oip->opcode)
1870 supp = 3;
1871 else if (0 == (FF_SA & oip->flags)) {
1872 na = oip->num_attached;
1873 for (k = 0, oip = oip->arrp; k < na;
1874 ++k, ++oip) {
1875 if (req_opcode == oip->opcode)
1876 break;
1877 }
1878 supp = (k >= na) ? 1 : 3;
1879 } else if (req_sa != oip->sa) {
1880 na = oip->num_attached;
1881 for (k = 0, oip = oip->arrp; k < na;
1882 ++k, ++oip) {
1883 if (req_sa == oip->sa)
1884 break;
1885 }
1886 supp = (k >= na) ? 1 : 3;
1887 } else
1888 supp = 3;
1889 if (3 == supp) {
1890 u = oip->len_mask[0];
1891 put_unaligned_be16(u, arr + 2);
1892 arr[4] = oip->opcode;
1893 for (k = 1; k < u; ++k)
1894 arr[4 + k] = (k < 16) ?
1895 oip->len_mask[k] : 0xff;
1896 offset = 4 + u;
1897 } else
1898 offset = 4;
1899 }
1900 arr[1] = (rctd ? 0x80 : 0) | supp;
1901 if (rctd) {
1902 put_unaligned_be16(0xa, arr + offset);
1903 offset += 12;
1904 }
1905 break;
1906 default:
1907 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1908 kfree(arr);
1909 return check_condition_result;
1910 }
1911 offset = (offset < a_len) ? offset : a_len;
1912 len = (offset < alloc_len) ? offset : alloc_len;
1913 errsts = fill_from_dev_buffer(scp, arr, len);
1914 kfree(arr);
1915 return errsts;
1916}
1917
1918static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1919 struct sdebug_dev_info *devip)
1920{
1921 bool repd;
1922 u32 alloc_len, len;
1923 u8 arr[16];
1924 u8 *cmd = scp->cmnd;
1925
1926 memset(arr, 0, sizeof(arr));
1927 repd = !!(cmd[2] & 0x80);
1928 alloc_len = get_unaligned_be32(cmd + 6);
1929 if (alloc_len < 4) {
1930 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1931 return check_condition_result;
1932 }
1933 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1934 arr[1] = 0x1; /* ITNRS */
1935 if (repd) {
1936 arr[3] = 0xc;
1937 len = 16;
1938 } else
1939 len = 4;
1940
1941 len = (len < alloc_len) ? len : alloc_len;
1942 return fill_from_dev_buffer(scp, arr, len);
1943}
1944
1945/* <<Following mode page info copied from ST318451LW>> */
1946
1947static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1948{ /* Read-Write Error Recovery page for mode_sense */
1949 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1950 5, 0, 0xff, 0xff};
1951
1952 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1953 if (1 == pcontrol)
1954 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1955 return sizeof(err_recov_pg);
1956}
1957
1958static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1959{ /* Disconnect-Reconnect page for mode_sense */
1960 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1961 0, 0, 0, 0, 0, 0, 0, 0};
1962
1963 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1964 if (1 == pcontrol)
1965 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1966 return sizeof(disconnect_pg);
1967}
1968
1969static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1970{ /* Format device page for mode_sense */
1971 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1972 0, 0, 0, 0, 0, 0, 0, 0,
1973 0, 0, 0, 0, 0x40, 0, 0, 0};
1974
1975 memcpy(p, format_pg, sizeof(format_pg));
1976 put_unaligned_be16(sdebug_sectors_per, p + 10);
1977 put_unaligned_be16(sdebug_sector_size, p + 12);
1978 if (sdebug_removable)
1979 p[20] |= 0x20; /* should agree with INQUIRY */
1980 if (1 == pcontrol)
1981 memset(p + 2, 0, sizeof(format_pg) - 2);
1982 return sizeof(format_pg);
1983}
1984
1985static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1986 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1987 0, 0, 0, 0};
1988
1989static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1990{ /* Caching page for mode_sense */
1991 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1992 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1993 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1994 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1995
1996 if (SDEBUG_OPT_N_WCE & sdebug_opts)
1997 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1998 memcpy(p, caching_pg, sizeof(caching_pg));
1999 if (1 == pcontrol)
2000 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2001 else if (2 == pcontrol)
2002 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2003 return sizeof(caching_pg);
2004}
2005
2006static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2007 0, 0, 0x2, 0x4b};
2008
2009static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2010{ /* Control mode page for mode_sense */
2011 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2012 0, 0, 0, 0};
2013 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2014 0, 0, 0x2, 0x4b};
2015
2016 if (sdebug_dsense)
2017 ctrl_m_pg[2] |= 0x4;
2018 else
2019 ctrl_m_pg[2] &= ~0x4;
2020
2021 if (sdebug_ato)
2022 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2023
2024 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2025 if (1 == pcontrol)
2026 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2027 else if (2 == pcontrol)
2028 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2029 return sizeof(ctrl_m_pg);
2030}
2031
2032
2033static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2034{ /* Informational Exceptions control mode page for mode_sense */
2035 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2036 0, 0, 0x0, 0x0};
2037 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2038 0, 0, 0x0, 0x0};
2039
2040 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2041 if (1 == pcontrol)
2042 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2043 else if (2 == pcontrol)
2044 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2045 return sizeof(iec_m_pg);
2046}
2047
2048static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2049{ /* SAS SSP mode page - short format for mode_sense */
2050 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2051 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2052
2053 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2054 if (1 == pcontrol)
2055 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2056 return sizeof(sas_sf_m_pg);
2057}
2058
2059
2060static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2061 int target_dev_id)
2062{ /* SAS phy control and discover mode page for mode_sense */
2063 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2064 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2065 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2066 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2067 0x2, 0, 0, 0, 0, 0, 0, 0,
2068 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2069 0, 0, 0, 0, 0, 0, 0, 0,
2070 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2071 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2072 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2073 0x3, 0, 0, 0, 0, 0, 0, 0,
2074 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2075 0, 0, 0, 0, 0, 0, 0, 0,
2076 };
2077 int port_a, port_b;
2078
2079 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2080 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2081 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2082 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2083 port_a = target_dev_id + 1;
2084 port_b = port_a + 1;
2085 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2086 put_unaligned_be32(port_a, p + 20);
2087 put_unaligned_be32(port_b, p + 48 + 20);
2088 if (1 == pcontrol)
2089 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2090 return sizeof(sas_pcd_m_pg);
2091}
2092
2093static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2094{ /* SAS SSP shared protocol specific port mode subpage */
2095 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2096 0, 0, 0, 0, 0, 0, 0, 0,
2097 };
2098
2099 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2100 if (1 == pcontrol)
2101 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2102 return sizeof(sas_sha_m_pg);
2103}
2104
2105#define SDEBUG_MAX_MSENSE_SZ 256
2106
2107static int resp_mode_sense(struct scsi_cmnd *scp,
2108 struct sdebug_dev_info *devip)
2109{
2110 int pcontrol, pcode, subpcode, bd_len;
2111 unsigned char dev_spec;
2112 int alloc_len, offset, len, target_dev_id;
2113 int target = scp->device->id;
2114 unsigned char *ap;
2115 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2116 unsigned char *cmd = scp->cmnd;
2117 bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2118
2119 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2120 pcontrol = (cmd[2] & 0xc0) >> 6;
2121 pcode = cmd[2] & 0x3f;
2122 subpcode = cmd[3];
2123 msense_6 = (MODE_SENSE == cmd[0]);
2124 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2125 is_disk = (sdebug_ptype == TYPE_DISK);
2126 if (is_disk && !dbd)
2127 bd_len = llbaa ? 16 : 8;
2128 else
2129 bd_len = 0;
2130 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2131 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2132 if (0x3 == pcontrol) { /* Saving values not supported */
2133 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2134 return check_condition_result;
2135 }
2136 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2137 (devip->target * 1000) - 3;
2138 /* for disks set DPOFUA bit and clear write protect (WP) bit */
2139 if (is_disk)
2140 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2141 else
2142 dev_spec = 0x0;
2143 if (msense_6) {
2144 arr[2] = dev_spec;
2145 arr[3] = bd_len;
2146 offset = 4;
2147 } else {
2148 arr[3] = dev_spec;
2149 if (16 == bd_len)
2150 arr[4] = 0x1; /* set LONGLBA bit */
2151 arr[7] = bd_len; /* assume 255 or less */
2152 offset = 8;
2153 }
2154 ap = arr + offset;
2155 if ((bd_len > 0) && (!sdebug_capacity))
2156 sdebug_capacity = get_sdebug_capacity();
2157
2158 if (8 == bd_len) {
2159 if (sdebug_capacity > 0xfffffffe)
2160 put_unaligned_be32(0xffffffff, ap + 0);
2161 else
2162 put_unaligned_be32(sdebug_capacity, ap + 0);
2163 put_unaligned_be16(sdebug_sector_size, ap + 6);
2164 offset += bd_len;
2165 ap = arr + offset;
2166 } else if (16 == bd_len) {
2167 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2168 put_unaligned_be32(sdebug_sector_size, ap + 12);
2169 offset += bd_len;
2170 ap = arr + offset;
2171 }
2172
2173 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2174 /* TODO: Control Extension page */
2175 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2176 return check_condition_result;
2177 }
2178 bad_pcode = false;
2179
2180 switch (pcode) {
2181 case 0x1: /* Read-Write error recovery page, direct access */
2182 len = resp_err_recov_pg(ap, pcontrol, target);
2183 offset += len;
2184 break;
2185 case 0x2: /* Disconnect-Reconnect page, all devices */
2186 len = resp_disconnect_pg(ap, pcontrol, target);
2187 offset += len;
2188 break;
2189 case 0x3: /* Format device page, direct access */
2190 if (is_disk) {
2191 len = resp_format_pg(ap, pcontrol, target);
2192 offset += len;
2193 } else
2194 bad_pcode = true;
2195 break;
2196 case 0x8: /* Caching page, direct access */
2197 if (is_disk) {
2198 len = resp_caching_pg(ap, pcontrol, target);
2199 offset += len;
2200 } else
2201 bad_pcode = true;
2202 break;
2203 case 0xa: /* Control Mode page, all devices */
2204 len = resp_ctrl_m_pg(ap, pcontrol, target);
2205 offset += len;
2206 break;
2207 case 0x19: /* if spc==1 then sas phy, control+discover */
2208 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2209 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2210 return check_condition_result;
2211 }
2212 len = 0;
2213 if ((0x0 == subpcode) || (0xff == subpcode))
2214 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2215 if ((0x1 == subpcode) || (0xff == subpcode))
2216 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2217 target_dev_id);
2218 if ((0x2 == subpcode) || (0xff == subpcode))
2219 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2220 offset += len;
2221 break;
2222 case 0x1c: /* Informational Exceptions Mode page, all devices */
2223 len = resp_iec_m_pg(ap, pcontrol, target);
2224 offset += len;
2225 break;
2226 case 0x3f: /* Read all Mode pages */
2227 if ((0 == subpcode) || (0xff == subpcode)) {
2228 len = resp_err_recov_pg(ap, pcontrol, target);
2229 len += resp_disconnect_pg(ap + len, pcontrol, target);
2230 if (is_disk) {
2231 len += resp_format_pg(ap + len, pcontrol,
2232 target);
2233 len += resp_caching_pg(ap + len, pcontrol,
2234 target);
2235 }
2236 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2237 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2238 if (0xff == subpcode) {
2239 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2240 target, target_dev_id);
2241 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2242 }
2243 len += resp_iec_m_pg(ap + len, pcontrol, target);
2244 offset += len;
2245 } else {
2246 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2247 return check_condition_result;
2248 }
2249 break;
2250 default:
2251 bad_pcode = true;
2252 break;
2253 }
2254 if (bad_pcode) {
2255 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2256 return check_condition_result;
2257 }
2258 if (msense_6)
2259 arr[0] = offset - 1;
2260 else
2261 put_unaligned_be16((offset - 2), arr + 0);
2262 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2263}
2264
2265#define SDEBUG_MAX_MSELECT_SZ 512
2266
2267static int resp_mode_select(struct scsi_cmnd *scp,
2268 struct sdebug_dev_info *devip)
2269{
2270 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2271 int param_len, res, mpage;
2272 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2273 unsigned char *cmd = scp->cmnd;
2274 int mselect6 = (MODE_SELECT == cmd[0]);
2275
2276 memset(arr, 0, sizeof(arr));
2277 pf = cmd[1] & 0x10;
2278 sp = cmd[1] & 0x1;
2279 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2280 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2281 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2282 return check_condition_result;
2283 }
2284 res = fetch_to_dev_buffer(scp, arr, param_len);
2285 if (-1 == res)
2286 return DID_ERROR << 16;
2287 else if (sdebug_verbose && (res < param_len))
2288 sdev_printk(KERN_INFO, scp->device,
2289 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2290 __func__, param_len, res);
2291 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2292 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2293 if (md_len > 2) {
2294 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2295 return check_condition_result;
2296 }
2297 off = bd_len + (mselect6 ? 4 : 8);
2298 mpage = arr[off] & 0x3f;
2299 ps = !!(arr[off] & 0x80);
2300 if (ps) {
2301 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2302 return check_condition_result;
2303 }
2304 spf = !!(arr[off] & 0x40);
2305 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2306 (arr[off + 1] + 2);
2307 if ((pg_len + off) > param_len) {
2308 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2309 PARAMETER_LIST_LENGTH_ERR, 0);
2310 return check_condition_result;
2311 }
2312 switch (mpage) {
2313 case 0x8: /* Caching Mode page */
2314 if (caching_pg[1] == arr[off + 1]) {
2315 memcpy(caching_pg + 2, arr + off + 2,
2316 sizeof(caching_pg) - 2);
2317 goto set_mode_changed_ua;
2318 }
2319 break;
2320 case 0xa: /* Control Mode page */
2321 if (ctrl_m_pg[1] == arr[off + 1]) {
2322 memcpy(ctrl_m_pg + 2, arr + off + 2,
2323 sizeof(ctrl_m_pg) - 2);
2324 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2325 goto set_mode_changed_ua;
2326 }
2327 break;
2328 case 0x1c: /* Informational Exceptions Mode page */
2329 if (iec_m_pg[1] == arr[off + 1]) {
2330 memcpy(iec_m_pg + 2, arr + off + 2,
2331 sizeof(iec_m_pg) - 2);
2332 goto set_mode_changed_ua;
2333 }
2334 break;
2335 default:
2336 break;
2337 }
2338 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2339 return check_condition_result;
2340set_mode_changed_ua:
2341 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2342 return 0;
2343}
2344
2345static int resp_temp_l_pg(unsigned char *arr)
2346{
2347 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2348 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2349 };
2350
2351 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2352 return sizeof(temp_l_pg);
2353}
2354
2355static int resp_ie_l_pg(unsigned char *arr)
2356{
2357 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2358 };
2359
2360 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2361 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2362 arr[4] = THRESHOLD_EXCEEDED;
2363 arr[5] = 0xff;
2364 }
2365 return sizeof(ie_l_pg);
2366}
2367
2368#define SDEBUG_MAX_LSENSE_SZ 512
2369
2370static int resp_log_sense(struct scsi_cmnd *scp,
2371 struct sdebug_dev_info *devip)
2372{
2373 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2374 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2375 unsigned char *cmd = scp->cmnd;
2376
2377 memset(arr, 0, sizeof(arr));
2378 ppc = cmd[1] & 0x2;
2379 sp = cmd[1] & 0x1;
2380 if (ppc || sp) {
2381 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2382 return check_condition_result;
2383 }
2384 pcode = cmd[2] & 0x3f;
2385 subpcode = cmd[3] & 0xff;
2386 alloc_len = get_unaligned_be16(cmd + 7);
2387 arr[0] = pcode;
2388 if (0 == subpcode) {
2389 switch (pcode) {
2390 case 0x0: /* Supported log pages log page */
2391 n = 4;
2392 arr[n++] = 0x0; /* this page */
2393 arr[n++] = 0xd; /* Temperature */
2394 arr[n++] = 0x2f; /* Informational exceptions */
2395 arr[3] = n - 4;
2396 break;
2397 case 0xd: /* Temperature log page */
2398 arr[3] = resp_temp_l_pg(arr + 4);
2399 break;
2400 case 0x2f: /* Informational exceptions log page */
2401 arr[3] = resp_ie_l_pg(arr + 4);
2402 break;
2403 default:
2404 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2405 return check_condition_result;
2406 }
2407 } else if (0xff == subpcode) {
2408 arr[0] |= 0x40;
2409 arr[1] = subpcode;
2410 switch (pcode) {
2411 case 0x0: /* Supported log pages and subpages log page */
2412 n = 4;
2413 arr[n++] = 0x0;
2414 arr[n++] = 0x0; /* 0,0 page */
2415 arr[n++] = 0x0;
2416 arr[n++] = 0xff; /* this page */
2417 arr[n++] = 0xd;
2418 arr[n++] = 0x0; /* Temperature */
2419 arr[n++] = 0x2f;
2420 arr[n++] = 0x0; /* Informational exceptions */
2421 arr[3] = n - 4;
2422 break;
2423 case 0xd: /* Temperature subpages */
2424 n = 4;
2425 arr[n++] = 0xd;
2426 arr[n++] = 0x0; /* Temperature */
2427 arr[3] = n - 4;
2428 break;
2429 case 0x2f: /* Informational exceptions subpages */
2430 n = 4;
2431 arr[n++] = 0x2f;
2432 arr[n++] = 0x0; /* Informational exceptions */
2433 arr[3] = n - 4;
2434 break;
2435 default:
2436 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2437 return check_condition_result;
2438 }
2439 } else {
2440 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2441 return check_condition_result;
2442 }
2443 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2444 return fill_from_dev_buffer(scp, arr,
2445 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2446}
2447
2448static int check_device_access_params(struct scsi_cmnd *scp,
2449 unsigned long long lba, unsigned int num)
2450{
2451 if (lba + num > sdebug_capacity) {
2452 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2453 return check_condition_result;
2454 }
2455 /* transfer length excessive (tie in to block limits VPD page) */
2456 if (num > sdebug_store_sectors) {
2457 /* needs work to find which cdb byte 'num' comes from */
2458 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2459 return check_condition_result;
2460 }
2461 return 0;
2462}
2463
2464/* Returns number of bytes copied or -1 if error. */
2465static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2466 u32 num, bool do_write)
2467{
2468 int ret;
2469 u64 block, rest = 0;
2470 struct scsi_data_buffer *sdb;
2471 enum dma_data_direction dir;
2472
2473 if (do_write) {
2474 sdb = scsi_out(scmd);
2475 dir = DMA_TO_DEVICE;
2476 } else {
2477 sdb = scsi_in(scmd);
2478 dir = DMA_FROM_DEVICE;
2479 }
2480
2481 if (!sdb->length)
2482 return 0;
2483 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2484 return -1;
2485
2486 block = do_div(lba, sdebug_store_sectors);
2487 if (block + num > sdebug_store_sectors)
2488 rest = block + num - sdebug_store_sectors;
2489
2490 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2491 fake_storep + (block * sdebug_sector_size),
2492 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2493 if (ret != (num - rest) * sdebug_sector_size)
2494 return ret;
2495
2496 if (rest) {
2497 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2498 fake_storep, rest * sdebug_sector_size,
2499 sg_skip + ((num - rest) * sdebug_sector_size),
2500 do_write);
2501 }
2502
2503 return ret;
2504}
2505
2506/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2507 * arr into fake_store(lba,num) and return true. If comparison fails then
2508 * return false. */
2509static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2510{
2511 bool res;
2512 u64 block, rest = 0;
2513 u32 store_blks = sdebug_store_sectors;
2514 u32 lb_size = sdebug_sector_size;
2515
2516 block = do_div(lba, store_blks);
2517 if (block + num > store_blks)
2518 rest = block + num - store_blks;
2519
2520 res = !memcmp(fake_storep + (block * lb_size), arr,
2521 (num - rest) * lb_size);
2522 if (!res)
2523 return res;
2524 if (rest)
2525 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2526 rest * lb_size);
2527 if (!res)
2528 return res;
2529 arr += num * lb_size;
2530 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2531 if (rest)
2532 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2533 rest * lb_size);
2534 return res;
2535}
2536
2537static __be16 dif_compute_csum(const void *buf, int len)
2538{
2539 __be16 csum;
2540
2541 if (sdebug_guard)
2542 csum = (__force __be16)ip_compute_csum(buf, len);
2543 else
2544 csum = cpu_to_be16(crc_t10dif(buf, len));
2545
2546 return csum;
2547}
2548
2549static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2550 sector_t sector, u32 ei_lba)
2551{
2552 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2553
2554 if (sdt->guard_tag != csum) {
2555 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2556 (unsigned long)sector,
2557 be16_to_cpu(sdt->guard_tag),
2558 be16_to_cpu(csum));
2559 return 0x01;
2560 }
2561 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2562 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2563 pr_err("REF check failed on sector %lu\n",
2564 (unsigned long)sector);
2565 return 0x03;
2566 }
2567 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2568 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2569 pr_err("REF check failed on sector %lu\n",
2570 (unsigned long)sector);
2571 return 0x03;
2572 }
2573 return 0;
2574}
2575
2576static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2577 unsigned int sectors, bool read)
2578{
2579 size_t resid;
2580 void *paddr;
2581 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2582 struct sg_mapping_iter miter;
2583
2584 /* Bytes of protection data to copy into sgl */
2585 resid = sectors * sizeof(*dif_storep);
2586
2587 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2588 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2589 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2590
2591 while (sg_miter_next(&miter) && resid > 0) {
2592 size_t len = min(miter.length, resid);
2593 void *start = dif_store(sector);
2594 size_t rest = 0;
2595
2596 if (dif_store_end < start + len)
2597 rest = start + len - dif_store_end;
2598
2599 paddr = miter.addr;
2600
2601 if (read)
2602 memcpy(paddr, start, len - rest);
2603 else
2604 memcpy(start, paddr, len - rest);
2605
2606 if (rest) {
2607 if (read)
2608 memcpy(paddr + len - rest, dif_storep, rest);
2609 else
2610 memcpy(dif_storep, paddr + len - rest, rest);
2611 }
2612
2613 sector += len / sizeof(*dif_storep);
2614 resid -= len;
2615 }
2616 sg_miter_stop(&miter);
2617}
2618
2619static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2620 unsigned int sectors, u32 ei_lba)
2621{
2622 unsigned int i;
2623 struct t10_pi_tuple *sdt;
2624 sector_t sector;
2625
2626 for (i = 0; i < sectors; i++, ei_lba++) {
2627 int ret;
2628
2629 sector = start_sec + i;
2630 sdt = dif_store(sector);
2631
2632 if (sdt->app_tag == cpu_to_be16(0xffff))
2633 continue;
2634
2635 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2636 if (ret) {
2637 dif_errors++;
2638 return ret;
2639 }
2640 }
2641
2642 dif_copy_prot(SCpnt, start_sec, sectors, true);
2643 dix_reads++;
2644
2645 return 0;
2646}
2647
2648static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2649{
2650 u8 *cmd = scp->cmnd;
2651 struct sdebug_queued_cmd *sqcp;
2652 u64 lba;
2653 u32 num;
2654 u32 ei_lba;
2655 unsigned long iflags;
2656 int ret;
2657 bool check_prot;
2658
2659 switch (cmd[0]) {
2660 case READ_16:
2661 ei_lba = 0;
2662 lba = get_unaligned_be64(cmd + 2);
2663 num = get_unaligned_be32(cmd + 10);
2664 check_prot = true;
2665 break;
2666 case READ_10:
2667 ei_lba = 0;
2668 lba = get_unaligned_be32(cmd + 2);
2669 num = get_unaligned_be16(cmd + 7);
2670 check_prot = true;
2671 break;
2672 case READ_6:
2673 ei_lba = 0;
2674 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2675 (u32)(cmd[1] & 0x1f) << 16;
2676 num = (0 == cmd[4]) ? 256 : cmd[4];
2677 check_prot = true;
2678 break;
2679 case READ_12:
2680 ei_lba = 0;
2681 lba = get_unaligned_be32(cmd + 2);
2682 num = get_unaligned_be32(cmd + 6);
2683 check_prot = true;
2684 break;
2685 case XDWRITEREAD_10:
2686 ei_lba = 0;
2687 lba = get_unaligned_be32(cmd + 2);
2688 num = get_unaligned_be16(cmd + 7);
2689 check_prot = false;
2690 break;
2691 default: /* assume READ(32) */
2692 lba = get_unaligned_be64(cmd + 12);
2693 ei_lba = get_unaligned_be32(cmd + 20);
2694 num = get_unaligned_be32(cmd + 28);
2695 check_prot = false;
2696 break;
2697 }
2698 if (unlikely(have_dif_prot && check_prot)) {
2699 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2700 (cmd[1] & 0xe0)) {
2701 mk_sense_invalid_opcode(scp);
2702 return check_condition_result;
2703 }
2704 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2705 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2706 (cmd[1] & 0xe0) == 0)
2707 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2708 "to DIF device\n");
2709 }
2710 if (unlikely(sdebug_any_injecting_opt)) {
2711 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2712
2713 if (sqcp) {
2714 if (sqcp->inj_short)
2715 num /= 2;
2716 }
2717 } else
2718 sqcp = NULL;
2719
2720 /* inline check_device_access_params() */
2721 if (unlikely(lba + num > sdebug_capacity)) {
2722 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2723 return check_condition_result;
2724 }
2725 /* transfer length excessive (tie in to block limits VPD page) */
2726 if (unlikely(num > sdebug_store_sectors)) {
2727 /* needs work to find which cdb byte 'num' comes from */
2728 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2729 return check_condition_result;
2730 }
2731
2732 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2733 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2734 ((lba + num) > sdebug_medium_error_start))) {
2735 /* claim unrecoverable read error */
2736 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2737 /* set info field and valid bit for fixed descriptor */
2738 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2739 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2740 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2741 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2742 put_unaligned_be32(ret, scp->sense_buffer + 3);
2743 }
2744 scsi_set_resid(scp, scsi_bufflen(scp));
2745 return check_condition_result;
2746 }
2747
2748 read_lock_irqsave(&atomic_rw, iflags);
2749
2750 /* DIX + T10 DIF */
2751 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2752 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2753
2754 if (prot_ret) {
2755 read_unlock_irqrestore(&atomic_rw, iflags);
2756 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2757 return illegal_condition_result;
2758 }
2759 }
2760
2761 ret = do_device_access(scp, 0, lba, num, false);
2762 read_unlock_irqrestore(&atomic_rw, iflags);
2763 if (unlikely(ret == -1))
2764 return DID_ERROR << 16;
2765
2766 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2767
2768 if (unlikely(sqcp)) {
2769 if (sqcp->inj_recovered) {
2770 mk_sense_buffer(scp, RECOVERED_ERROR,
2771 THRESHOLD_EXCEEDED, 0);
2772 return check_condition_result;
2773 } else if (sqcp->inj_transport) {
2774 mk_sense_buffer(scp, ABORTED_COMMAND,
2775 TRANSPORT_PROBLEM, ACK_NAK_TO);
2776 return check_condition_result;
2777 } else if (sqcp->inj_dif) {
2778 /* Logical block guard check failed */
2779 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2780 return illegal_condition_result;
2781 } else if (sqcp->inj_dix) {
2782 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2783 return illegal_condition_result;
2784 }
2785 }
2786 return 0;
2787}
2788
2789static void dump_sector(unsigned char *buf, int len)
2790{
2791 int i, j, n;
2792
2793 pr_err(">>> Sector Dump <<<\n");
2794 for (i = 0 ; i < len ; i += 16) {
2795 char b[128];
2796
2797 for (j = 0, n = 0; j < 16; j++) {
2798 unsigned char c = buf[i+j];
2799
2800 if (c >= 0x20 && c < 0x7e)
2801 n += scnprintf(b + n, sizeof(b) - n,
2802 " %c ", buf[i+j]);
2803 else
2804 n += scnprintf(b + n, sizeof(b) - n,
2805 "%02x ", buf[i+j]);
2806 }
2807 pr_err("%04d: %s\n", i, b);
2808 }
2809}
2810
2811static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2812 unsigned int sectors, u32 ei_lba)
2813{
2814 int ret;
2815 struct t10_pi_tuple *sdt;
2816 void *daddr;
2817 sector_t sector = start_sec;
2818 int ppage_offset;
2819 int dpage_offset;
2820 struct sg_mapping_iter diter;
2821 struct sg_mapping_iter piter;
2822
2823 BUG_ON(scsi_sg_count(SCpnt) == 0);
2824 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2825
2826 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2827 scsi_prot_sg_count(SCpnt),
2828 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2829 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2830 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2831
2832 /* For each protection page */
2833 while (sg_miter_next(&piter)) {
2834 dpage_offset = 0;
2835 if (WARN_ON(!sg_miter_next(&diter))) {
2836 ret = 0x01;
2837 goto out;
2838 }
2839
2840 for (ppage_offset = 0; ppage_offset < piter.length;
2841 ppage_offset += sizeof(struct t10_pi_tuple)) {
2842 /* If we're at the end of the current
2843 * data page advance to the next one
2844 */
2845 if (dpage_offset >= diter.length) {
2846 if (WARN_ON(!sg_miter_next(&diter))) {
2847 ret = 0x01;
2848 goto out;
2849 }
2850 dpage_offset = 0;
2851 }
2852
2853 sdt = piter.addr + ppage_offset;
2854 daddr = diter.addr + dpage_offset;
2855
2856 ret = dif_verify(sdt, daddr, sector, ei_lba);
2857 if (ret) {
2858 dump_sector(daddr, sdebug_sector_size);
2859 goto out;
2860 }
2861
2862 sector++;
2863 ei_lba++;
2864 dpage_offset += sdebug_sector_size;
2865 }
2866 diter.consumed = dpage_offset;
2867 sg_miter_stop(&diter);
2868 }
2869 sg_miter_stop(&piter);
2870
2871 dif_copy_prot(SCpnt, start_sec, sectors, false);
2872 dix_writes++;
2873
2874 return 0;
2875
2876out:
2877 dif_errors++;
2878 sg_miter_stop(&diter);
2879 sg_miter_stop(&piter);
2880 return ret;
2881}
2882
2883static unsigned long lba_to_map_index(sector_t lba)
2884{
2885 if (sdebug_unmap_alignment)
2886 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2887 sector_div(lba, sdebug_unmap_granularity);
2888 return lba;
2889}
2890
2891static sector_t map_index_to_lba(unsigned long index)
2892{
2893 sector_t lba = index * sdebug_unmap_granularity;
2894
2895 if (sdebug_unmap_alignment)
2896 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2897 return lba;
2898}
2899
2900static unsigned int map_state(sector_t lba, unsigned int *num)
2901{
2902 sector_t end;
2903 unsigned int mapped;
2904 unsigned long index;
2905 unsigned long next;
2906
2907 index = lba_to_map_index(lba);
2908 mapped = test_bit(index, map_storep);
2909
2910 if (mapped)
2911 next = find_next_zero_bit(map_storep, map_size, index);
2912 else
2913 next = find_next_bit(map_storep, map_size, index);
2914
2915 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2916 *num = end - lba;
2917 return mapped;
2918}
2919
2920static void map_region(sector_t lba, unsigned int len)
2921{
2922 sector_t end = lba + len;
2923
2924 while (lba < end) {
2925 unsigned long index = lba_to_map_index(lba);
2926
2927 if (index < map_size)
2928 set_bit(index, map_storep);
2929
2930 lba = map_index_to_lba(index + 1);
2931 }
2932}
2933
2934static void unmap_region(sector_t lba, unsigned int len)
2935{
2936 sector_t end = lba + len;
2937
2938 while (lba < end) {
2939 unsigned long index = lba_to_map_index(lba);
2940
2941 if (lba == map_index_to_lba(index) &&
2942 lba + sdebug_unmap_granularity <= end &&
2943 index < map_size) {
2944 clear_bit(index, map_storep);
2945 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
2946 memset(fake_storep +
2947 lba * sdebug_sector_size,
2948 (sdebug_lbprz & 1) ? 0 : 0xff,
2949 sdebug_sector_size *
2950 sdebug_unmap_granularity);
2951 }
2952 if (dif_storep) {
2953 memset(dif_storep + lba, 0xff,
2954 sizeof(*dif_storep) *
2955 sdebug_unmap_granularity);
2956 }
2957 }
2958 lba = map_index_to_lba(index + 1);
2959 }
2960}
2961
2962static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2963{
2964 u8 *cmd = scp->cmnd;
2965 u64 lba;
2966 u32 num;
2967 u32 ei_lba;
2968 unsigned long iflags;
2969 int ret;
2970 bool check_prot;
2971
2972 switch (cmd[0]) {
2973 case WRITE_16:
2974 ei_lba = 0;
2975 lba = get_unaligned_be64(cmd + 2);
2976 num = get_unaligned_be32(cmd + 10);
2977 check_prot = true;
2978 break;
2979 case WRITE_10:
2980 ei_lba = 0;
2981 lba = get_unaligned_be32(cmd + 2);
2982 num = get_unaligned_be16(cmd + 7);
2983 check_prot = true;
2984 break;
2985 case WRITE_6:
2986 ei_lba = 0;
2987 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2988 (u32)(cmd[1] & 0x1f) << 16;
2989 num = (0 == cmd[4]) ? 256 : cmd[4];
2990 check_prot = true;
2991 break;
2992 case WRITE_12:
2993 ei_lba = 0;
2994 lba = get_unaligned_be32(cmd + 2);
2995 num = get_unaligned_be32(cmd + 6);
2996 check_prot = true;
2997 break;
2998 case 0x53: /* XDWRITEREAD(10) */
2999 ei_lba = 0;
3000 lba = get_unaligned_be32(cmd + 2);
3001 num = get_unaligned_be16(cmd + 7);
3002 check_prot = false;
3003 break;
3004 default: /* assume WRITE(32) */
3005 lba = get_unaligned_be64(cmd + 12);
3006 ei_lba = get_unaligned_be32(cmd + 20);
3007 num = get_unaligned_be32(cmd + 28);
3008 check_prot = false;
3009 break;
3010 }
3011 if (unlikely(have_dif_prot && check_prot)) {
3012 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3013 (cmd[1] & 0xe0)) {
3014 mk_sense_invalid_opcode(scp);
3015 return check_condition_result;
3016 }
3017 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3018 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3019 (cmd[1] & 0xe0) == 0)
3020 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3021 "to DIF device\n");
3022 }
3023
3024 /* inline check_device_access_params() */
3025 if (unlikely(lba + num > sdebug_capacity)) {
3026 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3027 return check_condition_result;
3028 }
3029 /* transfer length excessive (tie in to block limits VPD page) */
3030 if (unlikely(num > sdebug_store_sectors)) {
3031 /* needs work to find which cdb byte 'num' comes from */
3032 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3033 return check_condition_result;
3034 }
3035
3036 write_lock_irqsave(&atomic_rw, iflags);
3037
3038 /* DIX + T10 DIF */
3039 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3040 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3041
3042 if (prot_ret) {
3043 write_unlock_irqrestore(&atomic_rw, iflags);
3044 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3045 return illegal_condition_result;
3046 }
3047 }
3048
3049 ret = do_device_access(scp, 0, lba, num, true);
3050 if (unlikely(scsi_debug_lbp()))
3051 map_region(lba, num);
3052 write_unlock_irqrestore(&atomic_rw, iflags);
3053 if (unlikely(-1 == ret))
3054 return DID_ERROR << 16;
3055 else if (unlikely(sdebug_verbose &&
3056 (ret < (num * sdebug_sector_size))))
3057 sdev_printk(KERN_INFO, scp->device,
3058 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3059 my_name, num * sdebug_sector_size, ret);
3060
3061 if (unlikely(sdebug_any_injecting_opt)) {
3062 struct sdebug_queued_cmd *sqcp =
3063 (struct sdebug_queued_cmd *)scp->host_scribble;
3064
3065 if (sqcp) {
3066 if (sqcp->inj_recovered) {
3067 mk_sense_buffer(scp, RECOVERED_ERROR,
3068 THRESHOLD_EXCEEDED, 0);
3069 return check_condition_result;
3070 } else if (sqcp->inj_dif) {
3071 /* Logical block guard check failed */
3072 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3073 return illegal_condition_result;
3074 } else if (sqcp->inj_dix) {
3075 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3076 return illegal_condition_result;
3077 }
3078 }
3079 }
3080 return 0;
3081}
3082
3083/*
3084 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3085 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3086 */
3087static int resp_write_scat(struct scsi_cmnd *scp,
3088 struct sdebug_dev_info *devip)
3089{
3090 u8 *cmd = scp->cmnd;
3091 u8 *lrdp = NULL;
3092 u8 *up;
3093 u8 wrprotect;
3094 u16 lbdof, num_lrd, k;
3095 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3096 u32 lb_size = sdebug_sector_size;
3097 u32 ei_lba;
3098 u64 lba;
3099 unsigned long iflags;
3100 int ret, res;
3101 bool is_16;
3102 static const u32 lrd_size = 32; /* + parameter list header size */
3103
3104 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3105 is_16 = false;
3106 wrprotect = (cmd[10] >> 5) & 0x7;
3107 lbdof = get_unaligned_be16(cmd + 12);
3108 num_lrd = get_unaligned_be16(cmd + 16);
3109 bt_len = get_unaligned_be32(cmd + 28);
3110 } else { /* that leaves WRITE SCATTERED(16) */
3111 is_16 = true;
3112 wrprotect = (cmd[2] >> 5) & 0x7;
3113 lbdof = get_unaligned_be16(cmd + 4);
3114 num_lrd = get_unaligned_be16(cmd + 8);
3115 bt_len = get_unaligned_be32(cmd + 10);
3116 if (unlikely(have_dif_prot)) {
3117 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3118 wrprotect) {
3119 mk_sense_invalid_opcode(scp);
3120 return illegal_condition_result;
3121 }
3122 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3123 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3124 wrprotect == 0)
3125 sdev_printk(KERN_ERR, scp->device,
3126 "Unprotected WR to DIF device\n");
3127 }
3128 }
3129 if ((num_lrd == 0) || (bt_len == 0))
3130 return 0; /* T10 says these do-nothings are not errors */
3131 if (lbdof == 0) {
3132 if (sdebug_verbose)
3133 sdev_printk(KERN_INFO, scp->device,
3134 "%s: %s: LB Data Offset field bad\n",
3135 my_name, __func__);
3136 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3137 return illegal_condition_result;
3138 }
3139 lbdof_blen = lbdof * lb_size;
3140 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3141 if (sdebug_verbose)
3142 sdev_printk(KERN_INFO, scp->device,
3143 "%s: %s: LBA range descriptors don't fit\n",
3144 my_name, __func__);
3145 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3146 return illegal_condition_result;
3147 }
3148 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3149 if (lrdp == NULL)
3150 return SCSI_MLQUEUE_HOST_BUSY;
3151 if (sdebug_verbose)
3152 sdev_printk(KERN_INFO, scp->device,
3153 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3154 my_name, __func__, lbdof_blen);
3155 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3156 if (res == -1) {
3157 ret = DID_ERROR << 16;
3158 goto err_out;
3159 }
3160
3161 write_lock_irqsave(&atomic_rw, iflags);
3162 sg_off = lbdof_blen;
3163 /* Spec says Buffer xfer Length field in number of LBs in dout */
3164 cum_lb = 0;
3165 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3166 lba = get_unaligned_be64(up + 0);
3167 num = get_unaligned_be32(up + 8);
3168 if (sdebug_verbose)
3169 sdev_printk(KERN_INFO, scp->device,
3170 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3171 my_name, __func__, k, lba, num, sg_off);
3172 if (num == 0)
3173 continue;
3174 ret = check_device_access_params(scp, lba, num);
3175 if (ret)
3176 goto err_out_unlock;
3177 num_by = num * lb_size;
3178 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3179
3180 if ((cum_lb + num) > bt_len) {
3181 if (sdebug_verbose)
3182 sdev_printk(KERN_INFO, scp->device,
3183 "%s: %s: sum of blocks > data provided\n",
3184 my_name, __func__);
3185 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3186 0);
3187 ret = illegal_condition_result;
3188 goto err_out_unlock;
3189 }
3190
3191 /* DIX + T10 DIF */
3192 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3193 int prot_ret = prot_verify_write(scp, lba, num,
3194 ei_lba);
3195
3196 if (prot_ret) {
3197 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3198 prot_ret);
3199 ret = illegal_condition_result;
3200 goto err_out_unlock;
3201 }
3202 }
3203
3204 ret = do_device_access(scp, sg_off, lba, num, true);
3205 if (unlikely(scsi_debug_lbp()))
3206 map_region(lba, num);
3207 if (unlikely(-1 == ret)) {
3208 ret = DID_ERROR << 16;
3209 goto err_out_unlock;
3210 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3211 sdev_printk(KERN_INFO, scp->device,
3212 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3213 my_name, num_by, ret);
3214
3215 if (unlikely(sdebug_any_injecting_opt)) {
3216 struct sdebug_queued_cmd *sqcp =
3217 (struct sdebug_queued_cmd *)scp->host_scribble;
3218
3219 if (sqcp) {
3220 if (sqcp->inj_recovered) {
3221 mk_sense_buffer(scp, RECOVERED_ERROR,
3222 THRESHOLD_EXCEEDED, 0);
3223 ret = illegal_condition_result;
3224 goto err_out_unlock;
3225 } else if (sqcp->inj_dif) {
3226 /* Logical block guard check failed */
3227 mk_sense_buffer(scp, ABORTED_COMMAND,
3228 0x10, 1);
3229 ret = illegal_condition_result;
3230 goto err_out_unlock;
3231 } else if (sqcp->inj_dix) {
3232 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3233 0x10, 1);
3234 ret = illegal_condition_result;
3235 goto err_out_unlock;
3236 }
3237 }
3238 }
3239 sg_off += num_by;
3240 cum_lb += num;
3241 }
3242 ret = 0;
3243err_out_unlock:
3244 write_unlock_irqrestore(&atomic_rw, iflags);
3245err_out:
3246 kfree(lrdp);
3247 return ret;
3248}
3249
3250static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3251 u32 ei_lba, bool unmap, bool ndob)
3252{
3253 unsigned long iflags;
3254 unsigned long long i;
3255 int ret;
3256 u64 lba_off;
3257
3258 ret = check_device_access_params(scp, lba, num);
3259 if (ret)
3260 return ret;
3261
3262 write_lock_irqsave(&atomic_rw, iflags);
3263
3264 if (unmap && scsi_debug_lbp()) {
3265 unmap_region(lba, num);
3266 goto out;
3267 }
3268
3269 lba_off = lba * sdebug_sector_size;
3270 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3271 if (ndob) {
3272 memset(fake_storep + lba_off, 0, sdebug_sector_size);
3273 ret = 0;
3274 } else
3275 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3276 sdebug_sector_size);
3277
3278 if (-1 == ret) {
3279 write_unlock_irqrestore(&atomic_rw, iflags);
3280 return DID_ERROR << 16;
3281 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3282 sdev_printk(KERN_INFO, scp->device,
3283 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3284 my_name, "write same",
3285 sdebug_sector_size, ret);
3286
3287 /* Copy first sector to remaining blocks */
3288 for (i = 1 ; i < num ; i++)
3289 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3290 fake_storep + lba_off,
3291 sdebug_sector_size);
3292
3293 if (scsi_debug_lbp())
3294 map_region(lba, num);
3295out:
3296 write_unlock_irqrestore(&atomic_rw, iflags);
3297
3298 return 0;
3299}
3300
3301static int resp_write_same_10(struct scsi_cmnd *scp,
3302 struct sdebug_dev_info *devip)
3303{
3304 u8 *cmd = scp->cmnd;
3305 u32 lba;
3306 u16 num;
3307 u32 ei_lba = 0;
3308 bool unmap = false;
3309
3310 if (cmd[1] & 0x8) {
3311 if (sdebug_lbpws10 == 0) {
3312 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3313 return check_condition_result;
3314 } else
3315 unmap = true;
3316 }
3317 lba = get_unaligned_be32(cmd + 2);
3318 num = get_unaligned_be16(cmd + 7);
3319 if (num > sdebug_write_same_length) {
3320 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3321 return check_condition_result;
3322 }
3323 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3324}
3325
3326static int resp_write_same_16(struct scsi_cmnd *scp,
3327 struct sdebug_dev_info *devip)
3328{
3329 u8 *cmd = scp->cmnd;
3330 u64 lba;
3331 u32 num;
3332 u32 ei_lba = 0;
3333 bool unmap = false;
3334 bool ndob = false;
3335
3336 if (cmd[1] & 0x8) { /* UNMAP */
3337 if (sdebug_lbpws == 0) {
3338 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3339 return check_condition_result;
3340 } else
3341 unmap = true;
3342 }
3343 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3344 ndob = true;
3345 lba = get_unaligned_be64(cmd + 2);
3346 num = get_unaligned_be32(cmd + 10);
3347 if (num > sdebug_write_same_length) {
3348 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3349 return check_condition_result;
3350 }
3351 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3352}
3353
3354/* Note the mode field is in the same position as the (lower) service action
3355 * field. For the Report supported operation codes command, SPC-4 suggests
3356 * each mode of this command should be reported separately; for future. */
3357static int resp_write_buffer(struct scsi_cmnd *scp,
3358 struct sdebug_dev_info *devip)
3359{
3360 u8 *cmd = scp->cmnd;
3361 struct scsi_device *sdp = scp->device;
3362 struct sdebug_dev_info *dp;
3363 u8 mode;
3364
3365 mode = cmd[1] & 0x1f;
3366 switch (mode) {
3367 case 0x4: /* download microcode (MC) and activate (ACT) */
3368 /* set UAs on this device only */
3369 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3370 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3371 break;
3372 case 0x5: /* download MC, save and ACT */
3373 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3374 break;
3375 case 0x6: /* download MC with offsets and ACT */
3376 /* set UAs on most devices (LUs) in this target */
3377 list_for_each_entry(dp,
3378 &devip->sdbg_host->dev_info_list,
3379 dev_list)
3380 if (dp->target == sdp->id) {
3381 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3382 if (devip != dp)
3383 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3384 dp->uas_bm);
3385 }
3386 break;
3387 case 0x7: /* download MC with offsets, save, and ACT */
3388 /* set UA on all devices (LUs) in this target */
3389 list_for_each_entry(dp,
3390 &devip->sdbg_host->dev_info_list,
3391 dev_list)
3392 if (dp->target == sdp->id)
3393 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3394 dp->uas_bm);
3395 break;
3396 default:
3397 /* do nothing for this command for other mode values */
3398 break;
3399 }
3400 return 0;
3401}
3402
3403static int resp_comp_write(struct scsi_cmnd *scp,
3404 struct sdebug_dev_info *devip)
3405{
3406 u8 *cmd = scp->cmnd;
3407 u8 *arr;
3408 u8 *fake_storep_hold;
3409 u64 lba;
3410 u32 dnum;
3411 u32 lb_size = sdebug_sector_size;
3412 u8 num;
3413 unsigned long iflags;
3414 int ret;
3415 int retval = 0;
3416
3417 lba = get_unaligned_be64(cmd + 2);
3418 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3419 if (0 == num)
3420 return 0; /* degenerate case, not an error */
3421 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3422 (cmd[1] & 0xe0)) {
3423 mk_sense_invalid_opcode(scp);
3424 return check_condition_result;
3425 }
3426 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3427 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3428 (cmd[1] & 0xe0) == 0)
3429 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3430 "to DIF device\n");
3431
3432 /* inline check_device_access_params() */
3433 if (lba + num > sdebug_capacity) {
3434 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3435 return check_condition_result;
3436 }
3437 /* transfer length excessive (tie in to block limits VPD page) */
3438 if (num > sdebug_store_sectors) {
3439 /* needs work to find which cdb byte 'num' comes from */
3440 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3441 return check_condition_result;
3442 }
3443 dnum = 2 * num;
3444 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3445 if (NULL == arr) {
3446 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3447 INSUFF_RES_ASCQ);
3448 return check_condition_result;
3449 }
3450
3451 write_lock_irqsave(&atomic_rw, iflags);
3452
3453 /* trick do_device_access() to fetch both compare and write buffers
3454 * from data-in into arr. Safe (atomic) since write_lock held. */
3455 fake_storep_hold = fake_storep;
3456 fake_storep = arr;
3457 ret = do_device_access(scp, 0, 0, dnum, true);
3458 fake_storep = fake_storep_hold;
3459 if (ret == -1) {
3460 retval = DID_ERROR << 16;
3461 goto cleanup;
3462 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3463 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3464 "indicated=%u, IO sent=%d bytes\n", my_name,
3465 dnum * lb_size, ret);
3466 if (!comp_write_worker(lba, num, arr)) {
3467 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3468 retval = check_condition_result;
3469 goto cleanup;
3470 }
3471 if (scsi_debug_lbp())
3472 map_region(lba, num);
3473cleanup:
3474 write_unlock_irqrestore(&atomic_rw, iflags);
3475 kfree(arr);
3476 return retval;
3477}
3478
3479struct unmap_block_desc {
3480 __be64 lba;
3481 __be32 blocks;
3482 __be32 __reserved;
3483};
3484
3485static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3486{
3487 unsigned char *buf;
3488 struct unmap_block_desc *desc;
3489 unsigned int i, payload_len, descriptors;
3490 int ret;
3491 unsigned long iflags;
3492
3493
3494 if (!scsi_debug_lbp())
3495 return 0; /* fib and say its done */
3496 payload_len = get_unaligned_be16(scp->cmnd + 7);
3497 BUG_ON(scsi_bufflen(scp) != payload_len);
3498
3499 descriptors = (payload_len - 8) / 16;
3500 if (descriptors > sdebug_unmap_max_desc) {
3501 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3502 return check_condition_result;
3503 }
3504
3505 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3506 if (!buf) {
3507 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3508 INSUFF_RES_ASCQ);
3509 return check_condition_result;
3510 }
3511
3512 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3513
3514 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3515 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3516
3517 desc = (void *)&buf[8];
3518
3519 write_lock_irqsave(&atomic_rw, iflags);
3520
3521 for (i = 0 ; i < descriptors ; i++) {
3522 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3523 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3524
3525 ret = check_device_access_params(scp, lba, num);
3526 if (ret)
3527 goto out;
3528
3529 unmap_region(lba, num);
3530 }
3531
3532 ret = 0;
3533
3534out:
3535 write_unlock_irqrestore(&atomic_rw, iflags);
3536 kfree(buf);
3537
3538 return ret;
3539}
3540
3541#define SDEBUG_GET_LBA_STATUS_LEN 32
3542
3543static int resp_get_lba_status(struct scsi_cmnd *scp,
3544 struct sdebug_dev_info *devip)
3545{
3546 u8 *cmd = scp->cmnd;
3547 u64 lba;
3548 u32 alloc_len, mapped, num;
3549 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3550 int ret;
3551
3552 lba = get_unaligned_be64(cmd + 2);
3553 alloc_len = get_unaligned_be32(cmd + 10);
3554
3555 if (alloc_len < 24)
3556 return 0;
3557
3558 ret = check_device_access_params(scp, lba, 1);
3559 if (ret)
3560 return ret;
3561
3562 if (scsi_debug_lbp())
3563 mapped = map_state(lba, &num);
3564 else {
3565 mapped = 1;
3566 /* following just in case virtual_gb changed */
3567 sdebug_capacity = get_sdebug_capacity();
3568 if (sdebug_capacity - lba <= 0xffffffff)
3569 num = sdebug_capacity - lba;
3570 else
3571 num = 0xffffffff;
3572 }
3573
3574 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3575 put_unaligned_be32(20, arr); /* Parameter Data Length */
3576 put_unaligned_be64(lba, arr + 8); /* LBA */
3577 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3578 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3579
3580 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3581}
3582
3583static int resp_sync_cache(struct scsi_cmnd *scp,
3584 struct sdebug_dev_info *devip)
3585{
3586 u64 lba;
3587 u32 num_blocks;
3588 u8 *cmd = scp->cmnd;
3589
3590 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
3591 lba = get_unaligned_be32(cmd + 2);
3592 num_blocks = get_unaligned_be16(cmd + 7);
3593 } else { /* SYNCHRONIZE_CACHE(16) */
3594 lba = get_unaligned_be64(cmd + 2);
3595 num_blocks = get_unaligned_be32(cmd + 10);
3596 }
3597 if (lba + num_blocks > sdebug_capacity) {
3598 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3599 return check_condition_result;
3600 }
3601 return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
3602}
3603
3604#define RL_BUCKET_ELEMS 8
3605
3606/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3607 * (W-LUN), the normal Linux scanning logic does not associate it with a
3608 * device (e.g. /dev/sg7). The following magic will make that association:
3609 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3610 * where <n> is a host number. If there are multiple targets in a host then
3611 * the above will associate a W-LUN to each target. To only get a W-LUN
3612 * for target 2, then use "echo '- 2 49409' > scan" .
3613 */
3614static int resp_report_luns(struct scsi_cmnd *scp,
3615 struct sdebug_dev_info *devip)
3616{
3617 unsigned char *cmd = scp->cmnd;
3618 unsigned int alloc_len;
3619 unsigned char select_report;
3620 u64 lun;
3621 struct scsi_lun *lun_p;
3622 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3623 unsigned int lun_cnt; /* normal LUN count (max: 256) */
3624 unsigned int wlun_cnt; /* report luns W-LUN count */
3625 unsigned int tlun_cnt; /* total LUN count */
3626 unsigned int rlen; /* response length (in bytes) */
3627 int k, j, n, res;
3628 unsigned int off_rsp = 0;
3629 const int sz_lun = sizeof(struct scsi_lun);
3630
3631 clear_luns_changed_on_target(devip);
3632
3633 select_report = cmd[2];
3634 alloc_len = get_unaligned_be32(cmd + 6);
3635
3636 if (alloc_len < 4) {
3637 pr_err("alloc len too small %d\n", alloc_len);
3638 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3639 return check_condition_result;
3640 }
3641
3642 switch (select_report) {
3643 case 0: /* all LUNs apart from W-LUNs */
3644 lun_cnt = sdebug_max_luns;
3645 wlun_cnt = 0;
3646 break;
3647 case 1: /* only W-LUNs */
3648 lun_cnt = 0;
3649 wlun_cnt = 1;
3650 break;
3651 case 2: /* all LUNs */
3652 lun_cnt = sdebug_max_luns;
3653 wlun_cnt = 1;
3654 break;
3655 case 0x10: /* only administrative LUs */
3656 case 0x11: /* see SPC-5 */
3657 case 0x12: /* only subsiduary LUs owned by referenced LU */
3658 default:
3659 pr_debug("select report invalid %d\n", select_report);
3660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3661 return check_condition_result;
3662 }
3663
3664 if (sdebug_no_lun_0 && (lun_cnt > 0))
3665 --lun_cnt;
3666
3667 tlun_cnt = lun_cnt + wlun_cnt;
3668 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
3669 scsi_set_resid(scp, scsi_bufflen(scp));
3670 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3671 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3672
3673 /* loops rely on sizeof response header same as sizeof lun (both 8) */
3674 lun = sdebug_no_lun_0 ? 1 : 0;
3675 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3676 memset(arr, 0, sizeof(arr));
3677 lun_p = (struct scsi_lun *)&arr[0];
3678 if (k == 0) {
3679 put_unaligned_be32(rlen, &arr[0]);
3680 ++lun_p;
3681 j = 1;
3682 }
3683 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3684 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3685 break;
3686 int_to_scsilun(lun++, lun_p);
3687 }
3688 if (j < RL_BUCKET_ELEMS)
3689 break;
3690 n = j * sz_lun;
3691 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3692 if (res)
3693 return res;
3694 off_rsp += n;
3695 }
3696 if (wlun_cnt) {
3697 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3698 ++j;
3699 }
3700 if (j > 0)
3701 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3702 return res;
3703}
3704
3705static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3706 unsigned int num, struct sdebug_dev_info *devip)
3707{
3708 int j;
3709 unsigned char *kaddr, *buf;
3710 unsigned int offset;
3711 struct scsi_data_buffer *sdb = scsi_in(scp);
3712 struct sg_mapping_iter miter;
3713
3714 /* better not to use temporary buffer. */
3715 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3716 if (!buf) {
3717 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3718 INSUFF_RES_ASCQ);
3719 return check_condition_result;
3720 }
3721
3722 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3723
3724 offset = 0;
3725 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3726 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3727
3728 while (sg_miter_next(&miter)) {
3729 kaddr = miter.addr;
3730 for (j = 0; j < miter.length; j++)
3731 *(kaddr + j) ^= *(buf + offset + j);
3732
3733 offset += miter.length;
3734 }
3735 sg_miter_stop(&miter);
3736 kfree(buf);
3737
3738 return 0;
3739}
3740
3741static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3742 struct sdebug_dev_info *devip)
3743{
3744 u8 *cmd = scp->cmnd;
3745 u64 lba;
3746 u32 num;
3747 int errsts;
3748
3749 if (!scsi_bidi_cmnd(scp)) {
3750 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3751 INSUFF_RES_ASCQ);
3752 return check_condition_result;
3753 }
3754 errsts = resp_read_dt0(scp, devip);
3755 if (errsts)
3756 return errsts;
3757 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3758 errsts = resp_write_dt0(scp, devip);
3759 if (errsts)
3760 return errsts;
3761 }
3762 lba = get_unaligned_be32(cmd + 2);
3763 num = get_unaligned_be16(cmd + 7);
3764 return resp_xdwriteread(scp, lba, num, devip);
3765}
3766
3767static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3768{
3769 u32 tag = blk_mq_unique_tag(cmnd->request);
3770 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3771
3772 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3773 if (WARN_ON_ONCE(hwq >= submit_queues))
3774 hwq = 0;
3775 return sdebug_q_arr + hwq;
3776}
3777
3778/* Queued (deferred) command completions converge here. */
3779static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3780{
3781 int qc_idx;
3782 int retiring = 0;
3783 unsigned long iflags;
3784 struct sdebug_queue *sqp;
3785 struct sdebug_queued_cmd *sqcp;
3786 struct scsi_cmnd *scp;
3787 struct sdebug_dev_info *devip;
3788
3789 sd_dp->defer_t = SDEB_DEFER_NONE;
3790 qc_idx = sd_dp->qc_idx;
3791 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3792 if (sdebug_statistics) {
3793 atomic_inc(&sdebug_completions);
3794 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3795 atomic_inc(&sdebug_miss_cpus);
3796 }
3797 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3798 pr_err("wild qc_idx=%d\n", qc_idx);
3799 return;
3800 }
3801 spin_lock_irqsave(&sqp->qc_lock, iflags);
3802 sqcp = &sqp->qc_arr[qc_idx];
3803 scp = sqcp->a_cmnd;
3804 if (unlikely(scp == NULL)) {
3805 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3806 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3807 sd_dp->sqa_idx, qc_idx);
3808 return;
3809 }
3810 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3811 if (likely(devip))
3812 atomic_dec(&devip->num_in_q);
3813 else
3814 pr_err("devip=NULL\n");
3815 if (unlikely(atomic_read(&retired_max_queue) > 0))
3816 retiring = 1;
3817
3818 sqcp->a_cmnd = NULL;
3819 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3820 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3821 pr_err("Unexpected completion\n");
3822 return;
3823 }
3824
3825 if (unlikely(retiring)) { /* user has reduced max_queue */
3826 int k, retval;
3827
3828 retval = atomic_read(&retired_max_queue);
3829 if (qc_idx >= retval) {
3830 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3831 pr_err("index %d too large\n", retval);
3832 return;
3833 }
3834 k = find_last_bit(sqp->in_use_bm, retval);
3835 if ((k < sdebug_max_queue) || (k == retval))
3836 atomic_set(&retired_max_queue, 0);
3837 else
3838 atomic_set(&retired_max_queue, k + 1);
3839 }
3840 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3841 scp->scsi_done(scp); /* callback to mid level */
3842}
3843
3844/* When high resolution timer goes off this function is called. */
3845static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3846{
3847 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3848 hrt);
3849 sdebug_q_cmd_complete(sd_dp);
3850 return HRTIMER_NORESTART;
3851}
3852
3853/* When work queue schedules work, it calls this function. */
3854static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3855{
3856 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3857 ew.work);
3858 sdebug_q_cmd_complete(sd_dp);
3859}
3860
3861static bool got_shared_uuid;
3862static uuid_t shared_uuid;
3863
3864static struct sdebug_dev_info *sdebug_device_create(
3865 struct sdebug_host_info *sdbg_host, gfp_t flags)
3866{
3867 struct sdebug_dev_info *devip;
3868
3869 devip = kzalloc(sizeof(*devip), flags);
3870 if (devip) {
3871 if (sdebug_uuid_ctl == 1)
3872 uuid_gen(&devip->lu_name);
3873 else if (sdebug_uuid_ctl == 2) {
3874 if (got_shared_uuid)
3875 devip->lu_name = shared_uuid;
3876 else {
3877 uuid_gen(&shared_uuid);
3878 got_shared_uuid = true;
3879 devip->lu_name = shared_uuid;
3880 }
3881 }
3882 devip->sdbg_host = sdbg_host;
3883 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3884 }
3885 return devip;
3886}
3887
3888static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3889{
3890 struct sdebug_host_info *sdbg_host;
3891 struct sdebug_dev_info *open_devip = NULL;
3892 struct sdebug_dev_info *devip;
3893
3894 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3895 if (!sdbg_host) {
3896 pr_err("Host info NULL\n");
3897 return NULL;
3898 }
3899 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3900 if ((devip->used) && (devip->channel == sdev->channel) &&
3901 (devip->target == sdev->id) &&
3902 (devip->lun == sdev->lun))
3903 return devip;
3904 else {
3905 if ((!devip->used) && (!open_devip))
3906 open_devip = devip;
3907 }
3908 }
3909 if (!open_devip) { /* try and make a new one */
3910 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3911 if (!open_devip) {
3912 pr_err("out of memory at line %d\n", __LINE__);
3913 return NULL;
3914 }
3915 }
3916
3917 open_devip->channel = sdev->channel;
3918 open_devip->target = sdev->id;
3919 open_devip->lun = sdev->lun;
3920 open_devip->sdbg_host = sdbg_host;
3921 atomic_set(&open_devip->num_in_q, 0);
3922 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3923 open_devip->used = true;
3924 return open_devip;
3925}
3926
3927static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3928{
3929 if (sdebug_verbose)
3930 pr_info("slave_alloc <%u %u %u %llu>\n",
3931 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3932 blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3933 return 0;
3934}
3935
3936static int scsi_debug_slave_configure(struct scsi_device *sdp)
3937{
3938 struct sdebug_dev_info *devip =
3939 (struct sdebug_dev_info *)sdp->hostdata;
3940
3941 if (sdebug_verbose)
3942 pr_info("slave_configure <%u %u %u %llu>\n",
3943 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3944 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3945 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3946 if (devip == NULL) {
3947 devip = find_build_dev_info(sdp);
3948 if (devip == NULL)
3949 return 1; /* no resources, will be marked offline */
3950 }
3951 sdp->hostdata = devip;
3952 blk_queue_max_segment_size(sdp->request_queue, -1U);
3953 if (sdebug_no_uld)
3954 sdp->no_uld_attach = 1;
3955 config_cdb_len(sdp);
3956 return 0;
3957}
3958
3959static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3960{
3961 struct sdebug_dev_info *devip =
3962 (struct sdebug_dev_info *)sdp->hostdata;
3963
3964 if (sdebug_verbose)
3965 pr_info("slave_destroy <%u %u %u %llu>\n",
3966 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3967 if (devip) {
3968 /* make this slot available for re-use */
3969 devip->used = false;
3970 sdp->hostdata = NULL;
3971 }
3972}
3973
3974static void stop_qc_helper(struct sdebug_defer *sd_dp,
3975 enum sdeb_defer_type defer_t)
3976{
3977 if (!sd_dp)
3978 return;
3979 if (defer_t == SDEB_DEFER_HRT)
3980 hrtimer_cancel(&sd_dp->hrt);
3981 else if (defer_t == SDEB_DEFER_WQ)
3982 cancel_work_sync(&sd_dp->ew.work);
3983}
3984
3985/* If @cmnd found deletes its timer or work queue and returns true; else
3986 returns false */
3987static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3988{
3989 unsigned long iflags;
3990 int j, k, qmax, r_qmax;
3991 enum sdeb_defer_type l_defer_t;
3992 struct sdebug_queue *sqp;
3993 struct sdebug_queued_cmd *sqcp;
3994 struct sdebug_dev_info *devip;
3995 struct sdebug_defer *sd_dp;
3996
3997 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3998 spin_lock_irqsave(&sqp->qc_lock, iflags);
3999 qmax = sdebug_max_queue;
4000 r_qmax = atomic_read(&retired_max_queue);
4001 if (r_qmax > qmax)
4002 qmax = r_qmax;
4003 for (k = 0; k < qmax; ++k) {
4004 if (test_bit(k, sqp->in_use_bm)) {
4005 sqcp = &sqp->qc_arr[k];
4006 if (cmnd != sqcp->a_cmnd)
4007 continue;
4008 /* found */
4009 devip = (struct sdebug_dev_info *)
4010 cmnd->device->hostdata;
4011 if (devip)
4012 atomic_dec(&devip->num_in_q);
4013 sqcp->a_cmnd = NULL;
4014 sd_dp = sqcp->sd_dp;
4015 if (sd_dp) {
4016 l_defer_t = sd_dp->defer_t;
4017 sd_dp->defer_t = SDEB_DEFER_NONE;
4018 } else
4019 l_defer_t = SDEB_DEFER_NONE;
4020 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4021 stop_qc_helper(sd_dp, l_defer_t);
4022 clear_bit(k, sqp->in_use_bm);
4023 return true;
4024 }
4025 }
4026 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4027 }
4028 return false;
4029}
4030
4031/* Deletes (stops) timers or work queues of all queued commands */
4032static void stop_all_queued(void)
4033{
4034 unsigned long iflags;
4035 int j, k;
4036 enum sdeb_defer_type l_defer_t;
4037 struct sdebug_queue *sqp;
4038 struct sdebug_queued_cmd *sqcp;
4039 struct sdebug_dev_info *devip;
4040 struct sdebug_defer *sd_dp;
4041
4042 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4043 spin_lock_irqsave(&sqp->qc_lock, iflags);
4044 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4045 if (test_bit(k, sqp->in_use_bm)) {
4046 sqcp = &sqp->qc_arr[k];
4047 if (sqcp->a_cmnd == NULL)
4048 continue;
4049 devip = (struct sdebug_dev_info *)
4050 sqcp->a_cmnd->device->hostdata;
4051 if (devip)
4052 atomic_dec(&devip->num_in_q);
4053 sqcp->a_cmnd = NULL;
4054 sd_dp = sqcp->sd_dp;
4055 if (sd_dp) {
4056 l_defer_t = sd_dp->defer_t;
4057 sd_dp->defer_t = SDEB_DEFER_NONE;
4058 } else
4059 l_defer_t = SDEB_DEFER_NONE;
4060 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4061 stop_qc_helper(sd_dp, l_defer_t);
4062 clear_bit(k, sqp->in_use_bm);
4063 spin_lock_irqsave(&sqp->qc_lock, iflags);
4064 }
4065 }
4066 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4067 }
4068}
4069
4070/* Free queued command memory on heap */
4071static void free_all_queued(void)
4072{
4073 int j, k;
4074 struct sdebug_queue *sqp;
4075 struct sdebug_queued_cmd *sqcp;
4076
4077 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4078 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4079 sqcp = &sqp->qc_arr[k];
4080 kfree(sqcp->sd_dp);
4081 sqcp->sd_dp = NULL;
4082 }
4083 }
4084}
4085
4086static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4087{
4088 bool ok;
4089
4090 ++num_aborts;
4091 if (SCpnt) {
4092 ok = stop_queued_cmnd(SCpnt);
4093 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4094 sdev_printk(KERN_INFO, SCpnt->device,
4095 "%s: command%s found\n", __func__,
4096 ok ? "" : " not");
4097 }
4098 return SUCCESS;
4099}
4100
4101static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4102{
4103 ++num_dev_resets;
4104 if (SCpnt && SCpnt->device) {
4105 struct scsi_device *sdp = SCpnt->device;
4106 struct sdebug_dev_info *devip =
4107 (struct sdebug_dev_info *)sdp->hostdata;
4108
4109 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4110 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4111 if (devip)
4112 set_bit(SDEBUG_UA_POR, devip->uas_bm);
4113 }
4114 return SUCCESS;
4115}
4116
4117static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4118{
4119 struct sdebug_host_info *sdbg_host;
4120 struct sdebug_dev_info *devip;
4121 struct scsi_device *sdp;
4122 struct Scsi_Host *hp;
4123 int k = 0;
4124
4125 ++num_target_resets;
4126 if (!SCpnt)
4127 goto lie;
4128 sdp = SCpnt->device;
4129 if (!sdp)
4130 goto lie;
4131 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4132 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4133 hp = sdp->host;
4134 if (!hp)
4135 goto lie;
4136 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4137 if (sdbg_host) {
4138 list_for_each_entry(devip,
4139 &sdbg_host->dev_info_list,
4140 dev_list)
4141 if (devip->target == sdp->id) {
4142 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4143 ++k;
4144 }
4145 }
4146 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4147 sdev_printk(KERN_INFO, sdp,
4148 "%s: %d device(s) found in target\n", __func__, k);
4149lie:
4150 return SUCCESS;
4151}
4152
4153static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4154{
4155 struct sdebug_host_info *sdbg_host;
4156 struct sdebug_dev_info *devip;
4157 struct scsi_device *sdp;
4158 struct Scsi_Host *hp;
4159 int k = 0;
4160
4161 ++num_bus_resets;
4162 if (!(SCpnt && SCpnt->device))
4163 goto lie;
4164 sdp = SCpnt->device;
4165 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4166 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4167 hp = sdp->host;
4168 if (hp) {
4169 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4170 if (sdbg_host) {
4171 list_for_each_entry(devip,
4172 &sdbg_host->dev_info_list,
4173 dev_list) {
4174 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4175 ++k;
4176 }
4177 }
4178 }
4179 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4180 sdev_printk(KERN_INFO, sdp,
4181 "%s: %d device(s) found in host\n", __func__, k);
4182lie:
4183 return SUCCESS;
4184}
4185
4186static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4187{
4188 struct sdebug_host_info *sdbg_host;
4189 struct sdebug_dev_info *devip;
4190 int k = 0;
4191
4192 ++num_host_resets;
4193 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4194 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4195 spin_lock(&sdebug_host_list_lock);
4196 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4197 list_for_each_entry(devip, &sdbg_host->dev_info_list,
4198 dev_list) {
4199 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4200 ++k;
4201 }
4202 }
4203 spin_unlock(&sdebug_host_list_lock);
4204 stop_all_queued();
4205 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4206 sdev_printk(KERN_INFO, SCpnt->device,
4207 "%s: %d device(s) found\n", __func__, k);
4208 return SUCCESS;
4209}
4210
4211static void __init sdebug_build_parts(unsigned char *ramp,
4212 unsigned long store_size)
4213{
4214 struct partition *pp;
4215 int starts[SDEBUG_MAX_PARTS + 2];
4216 int sectors_per_part, num_sectors, k;
4217 int heads_by_sects, start_sec, end_sec;
4218
4219 /* assume partition table already zeroed */
4220 if ((sdebug_num_parts < 1) || (store_size < 1048576))
4221 return;
4222 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4223 sdebug_num_parts = SDEBUG_MAX_PARTS;
4224 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4225 }
4226 num_sectors = (int)sdebug_store_sectors;
4227 sectors_per_part = (num_sectors - sdebug_sectors_per)
4228 / sdebug_num_parts;
4229 heads_by_sects = sdebug_heads * sdebug_sectors_per;
4230 starts[0] = sdebug_sectors_per;
4231 for (k = 1; k < sdebug_num_parts; ++k)
4232 starts[k] = ((k * sectors_per_part) / heads_by_sects)
4233 * heads_by_sects;
4234 starts[sdebug_num_parts] = num_sectors;
4235 starts[sdebug_num_parts + 1] = 0;
4236
4237 ramp[510] = 0x55; /* magic partition markings */
4238 ramp[511] = 0xAA;
4239 pp = (struct partition *)(ramp + 0x1be);
4240 for (k = 0; starts[k + 1]; ++k, ++pp) {
4241 start_sec = starts[k];
4242 end_sec = starts[k + 1] - 1;
4243 pp->boot_ind = 0;
4244
4245 pp->cyl = start_sec / heads_by_sects;
4246 pp->head = (start_sec - (pp->cyl * heads_by_sects))
4247 / sdebug_sectors_per;
4248 pp->sector = (start_sec % sdebug_sectors_per) + 1;
4249
4250 pp->end_cyl = end_sec / heads_by_sects;
4251 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4252 / sdebug_sectors_per;
4253 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4254
4255 pp->start_sect = cpu_to_le32(start_sec);
4256 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4257 pp->sys_ind = 0x83; /* plain Linux partition */
4258 }
4259}
4260
4261static void block_unblock_all_queues(bool block)
4262{
4263 int j;
4264 struct sdebug_queue *sqp;
4265
4266 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4267 atomic_set(&sqp->blocked, (int)block);
4268}
4269
4270/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4271 * commands will be processed normally before triggers occur.
4272 */
4273static void tweak_cmnd_count(void)
4274{
4275 int count, modulo;
4276
4277 modulo = abs(sdebug_every_nth);
4278 if (modulo < 2)
4279 return;
4280 block_unblock_all_queues(true);
4281 count = atomic_read(&sdebug_cmnd_count);
4282 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4283 block_unblock_all_queues(false);
4284}
4285
4286static void clear_queue_stats(void)
4287{
4288 atomic_set(&sdebug_cmnd_count, 0);
4289 atomic_set(&sdebug_completions, 0);
4290 atomic_set(&sdebug_miss_cpus, 0);
4291 atomic_set(&sdebug_a_tsf, 0);
4292}
4293
4294static void setup_inject(struct sdebug_queue *sqp,
4295 struct sdebug_queued_cmd *sqcp)
4296{
4297 if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4298 if (sdebug_every_nth > 0)
4299 sqcp->inj_recovered = sqcp->inj_transport
4300 = sqcp->inj_dif
4301 = sqcp->inj_dix = sqcp->inj_short = 0;
4302 return;
4303 }
4304 sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4305 sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4306 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4307 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4308 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4309 sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4310}
4311
4312/* Complete the processing of the thread that queued a SCSI command to this
4313 * driver. It either completes the command by calling cmnd_done() or
4314 * schedules a hr timer or work queue then returns 0. Returns
4315 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4316 */
4317static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4318 int scsi_result,
4319 int (*pfp)(struct scsi_cmnd *,
4320 struct sdebug_dev_info *),
4321 int delta_jiff, int ndelay)
4322{
4323 unsigned long iflags;
4324 int k, num_in_q, qdepth, inject;
4325 struct sdebug_queue *sqp;
4326 struct sdebug_queued_cmd *sqcp;
4327 struct scsi_device *sdp;
4328 struct sdebug_defer *sd_dp;
4329
4330 if (unlikely(devip == NULL)) {
4331 if (scsi_result == 0)
4332 scsi_result = DID_NO_CONNECT << 16;
4333 goto respond_in_thread;
4334 }
4335 sdp = cmnd->device;
4336
4337 if (delta_jiff == 0)
4338 goto respond_in_thread;
4339
4340 /* schedule the response at a later time if resources permit */
4341 sqp = get_queue(cmnd);
4342 spin_lock_irqsave(&sqp->qc_lock, iflags);
4343 if (unlikely(atomic_read(&sqp->blocked))) {
4344 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4345 return SCSI_MLQUEUE_HOST_BUSY;
4346 }
4347 num_in_q = atomic_read(&devip->num_in_q);
4348 qdepth = cmnd->device->queue_depth;
4349 inject = 0;
4350 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4351 if (scsi_result) {
4352 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4353 goto respond_in_thread;
4354 } else
4355 scsi_result = device_qfull_result;
4356 } else if (unlikely(sdebug_every_nth &&
4357 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4358 (scsi_result == 0))) {
4359 if ((num_in_q == (qdepth - 1)) &&
4360 (atomic_inc_return(&sdebug_a_tsf) >=
4361 abs(sdebug_every_nth))) {
4362 atomic_set(&sdebug_a_tsf, 0);
4363 inject = 1;
4364 scsi_result = device_qfull_result;
4365 }
4366 }
4367
4368 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4369 if (unlikely(k >= sdebug_max_queue)) {
4370 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4371 if (scsi_result)
4372 goto respond_in_thread;
4373 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4374 scsi_result = device_qfull_result;
4375 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4376 sdev_printk(KERN_INFO, sdp,
4377 "%s: max_queue=%d exceeded, %s\n",
4378 __func__, sdebug_max_queue,
4379 (scsi_result ? "status: TASK SET FULL" :
4380 "report: host busy"));
4381 if (scsi_result)
4382 goto respond_in_thread;
4383 else
4384 return SCSI_MLQUEUE_HOST_BUSY;
4385 }
4386 __set_bit(k, sqp->in_use_bm);
4387 atomic_inc(&devip->num_in_q);
4388 sqcp = &sqp->qc_arr[k];
4389 sqcp->a_cmnd = cmnd;
4390 cmnd->host_scribble = (unsigned char *)sqcp;
4391 sd_dp = sqcp->sd_dp;
4392 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4393 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4394 setup_inject(sqp, sqcp);
4395 if (sd_dp == NULL) {
4396 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4397 if (sd_dp == NULL)
4398 return SCSI_MLQUEUE_HOST_BUSY;
4399 }
4400
4401 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4402 if (cmnd->result & SDEG_RES_IMMED_MASK) {
4403 /*
4404 * This is the F_DELAY_OVERR case. No delay.
4405 */
4406 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4407 delta_jiff = ndelay = 0;
4408 }
4409 if (cmnd->result == 0 && scsi_result != 0)
4410 cmnd->result = scsi_result;
4411
4412 if (unlikely(sdebug_verbose && cmnd->result))
4413 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4414 __func__, cmnd->result);
4415
4416 if (delta_jiff > 0 || ndelay > 0) {
4417 ktime_t kt;
4418
4419 if (delta_jiff > 0) {
4420 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4421 } else
4422 kt = ndelay;
4423 if (!sd_dp->init_hrt) {
4424 sd_dp->init_hrt = true;
4425 sqcp->sd_dp = sd_dp;
4426 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4427 HRTIMER_MODE_REL_PINNED);
4428 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4429 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4430 sd_dp->qc_idx = k;
4431 }
4432 if (sdebug_statistics)
4433 sd_dp->issuing_cpu = raw_smp_processor_id();
4434 sd_dp->defer_t = SDEB_DEFER_HRT;
4435 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4436 } else { /* jdelay < 0, use work queue */
4437 if (!sd_dp->init_wq) {
4438 sd_dp->init_wq = true;
4439 sqcp->sd_dp = sd_dp;
4440 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4441 sd_dp->qc_idx = k;
4442 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4443 }
4444 if (sdebug_statistics)
4445 sd_dp->issuing_cpu = raw_smp_processor_id();
4446 sd_dp->defer_t = SDEB_DEFER_WQ;
4447 schedule_work(&sd_dp->ew.work);
4448 }
4449 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4450 (scsi_result == device_qfull_result)))
4451 sdev_printk(KERN_INFO, sdp,
4452 "%s: num_in_q=%d +1, %s%s\n", __func__,
4453 num_in_q, (inject ? "<inject> " : ""),
4454 "status: TASK SET FULL");
4455 return 0;
4456
4457respond_in_thread: /* call back to mid-layer using invocation thread */
4458 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4459 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4460 if (cmnd->result == 0 && scsi_result != 0)
4461 cmnd->result = scsi_result;
4462 cmnd->scsi_done(cmnd);
4463 return 0;
4464}
4465
4466/* Note: The following macros create attribute files in the
4467 /sys/module/scsi_debug/parameters directory. Unfortunately this
4468 driver is unaware of a change and cannot trigger auxiliary actions
4469 as it can when the corresponding attribute in the
4470 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4471 */
4472module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4473module_param_named(ato, sdebug_ato, int, S_IRUGO);
4474module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4475module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4476module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4477module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4478module_param_named(dif, sdebug_dif, int, S_IRUGO);
4479module_param_named(dix, sdebug_dix, int, S_IRUGO);
4480module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4481module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4482module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4483module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4484module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4485module_param_string(inq_vendor, sdebug_inq_vendor_id,
4486 sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4487module_param_string(inq_product, sdebug_inq_product_id,
4488 sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4489module_param_string(inq_rev, sdebug_inq_product_rev,
4490 sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4491module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4492module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4493module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4494module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4495module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4496module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4497module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4498module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4499module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4500module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4501module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4502module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4503module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4504module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4505module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4506module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4507module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4508module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4509module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4510module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4511module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4512module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4513module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4514module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4515module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4516module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4517module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4518module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4519module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4520module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4521module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4522module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4523 S_IRUGO | S_IWUSR);
4524module_param_named(write_same_length, sdebug_write_same_length, int,
4525 S_IRUGO | S_IWUSR);
4526
4527MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4528MODULE_DESCRIPTION("SCSI debug adapter driver");
4529MODULE_LICENSE("GPL");
4530MODULE_VERSION(SDEBUG_VERSION);
4531
4532MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4533MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4534MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4535MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4536MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4537MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4538MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4539MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4540MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4541MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4542MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4543MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4544MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4545MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4546MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4547MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4548 SDEBUG_VERSION "\")");
4549MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4550MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4551MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4552MODULE_PARM_DESC(lbprz,
4553 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4554MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4555MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4556MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4557MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4558MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4559MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4560MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4561MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4562MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4563MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4564MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4565MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4566MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4567MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4568MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4569MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4570MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4571MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4572MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4573MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4574MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4575MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4576MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4577MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4578MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4579MODULE_PARM_DESC(uuid_ctl,
4580 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4581MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4582MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4583MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4584
4585#define SDEBUG_INFO_LEN 256
4586static char sdebug_info[SDEBUG_INFO_LEN];
4587
4588static const char *scsi_debug_info(struct Scsi_Host *shp)
4589{
4590 int k;
4591
4592 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4593 my_name, SDEBUG_VERSION, sdebug_version_date);
4594 if (k >= (SDEBUG_INFO_LEN - 1))
4595 return sdebug_info;
4596 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4597 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4598 sdebug_dev_size_mb, sdebug_opts, submit_queues,
4599 "statistics", (int)sdebug_statistics);
4600 return sdebug_info;
4601}
4602
4603/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4604static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4605 int length)
4606{
4607 char arr[16];
4608 int opts;
4609 int minLen = length > 15 ? 15 : length;
4610
4611 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4612 return -EACCES;
4613 memcpy(arr, buffer, minLen);
4614 arr[minLen] = '\0';
4615 if (1 != sscanf(arr, "%d", &opts))
4616 return -EINVAL;
4617 sdebug_opts = opts;
4618 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4619 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4620 if (sdebug_every_nth != 0)
4621 tweak_cmnd_count();
4622 return length;
4623}
4624
4625/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4626 * same for each scsi_debug host (if more than one). Some of the counters
4627 * output are not atomics so might be inaccurate in a busy system. */
4628static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4629{
4630 int f, j, l;
4631 struct sdebug_queue *sqp;
4632
4633 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4634 SDEBUG_VERSION, sdebug_version_date);
4635 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4636 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4637 sdebug_opts, sdebug_every_nth);
4638 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4639 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4640 sdebug_sector_size, "bytes");
4641 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4642 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4643 num_aborts);
4644 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4645 num_dev_resets, num_target_resets, num_bus_resets,
4646 num_host_resets);
4647 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4648 dix_reads, dix_writes, dif_errors);
4649 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4650 sdebug_statistics);
4651 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4652 atomic_read(&sdebug_cmnd_count),
4653 atomic_read(&sdebug_completions),
4654 "miss_cpus", atomic_read(&sdebug_miss_cpus),
4655 atomic_read(&sdebug_a_tsf));
4656
4657 seq_printf(m, "submit_queues=%d\n", submit_queues);
4658 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4659 seq_printf(m, " queue %d:\n", j);
4660 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4661 if (f != sdebug_max_queue) {
4662 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4663 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
4664 "first,last bits", f, l);
4665 }
4666 }
4667 return 0;
4668}
4669
4670static ssize_t delay_show(struct device_driver *ddp, char *buf)
4671{
4672 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4673}
4674/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4675 * of delay is jiffies.
4676 */
4677static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4678 size_t count)
4679{
4680 int jdelay, res;
4681
4682 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4683 res = count;
4684 if (sdebug_jdelay != jdelay) {
4685 int j, k;
4686 struct sdebug_queue *sqp;
4687
4688 block_unblock_all_queues(true);
4689 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4690 ++j, ++sqp) {
4691 k = find_first_bit(sqp->in_use_bm,
4692 sdebug_max_queue);
4693 if (k != sdebug_max_queue) {
4694 res = -EBUSY; /* queued commands */
4695 break;
4696 }
4697 }
4698 if (res > 0) {
4699 sdebug_jdelay = jdelay;
4700 sdebug_ndelay = 0;
4701 }
4702 block_unblock_all_queues(false);
4703 }
4704 return res;
4705 }
4706 return -EINVAL;
4707}
4708static DRIVER_ATTR_RW(delay);
4709
4710static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4711{
4712 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4713}
4714/* Returns -EBUSY if ndelay is being changed and commands are queued */
4715/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4716static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4717 size_t count)
4718{
4719 int ndelay, res;
4720
4721 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4722 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4723 res = count;
4724 if (sdebug_ndelay != ndelay) {
4725 int j, k;
4726 struct sdebug_queue *sqp;
4727
4728 block_unblock_all_queues(true);
4729 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4730 ++j, ++sqp) {
4731 k = find_first_bit(sqp->in_use_bm,
4732 sdebug_max_queue);
4733 if (k != sdebug_max_queue) {
4734 res = -EBUSY; /* queued commands */
4735 break;
4736 }
4737 }
4738 if (res > 0) {
4739 sdebug_ndelay = ndelay;
4740 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4741 : DEF_JDELAY;
4742 }
4743 block_unblock_all_queues(false);
4744 }
4745 return res;
4746 }
4747 return -EINVAL;
4748}
4749static DRIVER_ATTR_RW(ndelay);
4750
4751static ssize_t opts_show(struct device_driver *ddp, char *buf)
4752{
4753 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4754}
4755
4756static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4757 size_t count)
4758{
4759 int opts;
4760 char work[20];
4761
4762 if (sscanf(buf, "%10s", work) == 1) {
4763 if (strncasecmp(work, "0x", 2) == 0) {
4764 if (kstrtoint(work + 2, 16, &opts) == 0)
4765 goto opts_done;
4766 } else {
4767 if (kstrtoint(work, 10, &opts) == 0)
4768 goto opts_done;
4769 }
4770 }
4771 return -EINVAL;
4772opts_done:
4773 sdebug_opts = opts;
4774 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4775 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4776 tweak_cmnd_count();
4777 return count;
4778}
4779static DRIVER_ATTR_RW(opts);
4780
4781static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4782{
4783 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4784}
4785static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4786 size_t count)
4787{
4788 int n;
4789
4790 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4791 sdebug_ptype = n;
4792 return count;
4793 }
4794 return -EINVAL;
4795}
4796static DRIVER_ATTR_RW(ptype);
4797
4798static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4799{
4800 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4801}
4802static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4803 size_t count)
4804{
4805 int n;
4806
4807 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4808 sdebug_dsense = n;
4809 return count;
4810 }
4811 return -EINVAL;
4812}
4813static DRIVER_ATTR_RW(dsense);
4814
4815static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4816{
4817 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4818}
4819static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4820 size_t count)
4821{
4822 int n;
4823
4824 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4825 n = (n > 0);
4826 sdebug_fake_rw = (sdebug_fake_rw > 0);
4827 if (sdebug_fake_rw != n) {
4828 if ((0 == n) && (NULL == fake_storep)) {
4829 unsigned long sz =
4830 (unsigned long)sdebug_dev_size_mb *
4831 1048576;
4832
4833 fake_storep = vmalloc(sz);
4834 if (NULL == fake_storep) {
4835 pr_err("out of memory, 9\n");
4836 return -ENOMEM;
4837 }
4838 memset(fake_storep, 0, sz);
4839 }
4840 sdebug_fake_rw = n;
4841 }
4842 return count;
4843 }
4844 return -EINVAL;
4845}
4846static DRIVER_ATTR_RW(fake_rw);
4847
4848static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4849{
4850 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4851}
4852static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4853 size_t count)
4854{
4855 int n;
4856
4857 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4858 sdebug_no_lun_0 = n;
4859 return count;
4860 }
4861 return -EINVAL;
4862}
4863static DRIVER_ATTR_RW(no_lun_0);
4864
4865static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4866{
4867 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4868}
4869static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4870 size_t count)
4871{
4872 int n;
4873
4874 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4875 sdebug_num_tgts = n;
4876 sdebug_max_tgts_luns();
4877 return count;
4878 }
4879 return -EINVAL;
4880}
4881static DRIVER_ATTR_RW(num_tgts);
4882
4883static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4884{
4885 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4886}
4887static DRIVER_ATTR_RO(dev_size_mb);
4888
4889static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4890{
4891 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4892}
4893static DRIVER_ATTR_RO(num_parts);
4894
4895static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4896{
4897 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4898}
4899static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4900 size_t count)
4901{
4902 int nth;
4903
4904 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4905 sdebug_every_nth = nth;
4906 if (nth && !sdebug_statistics) {
4907 pr_info("every_nth needs statistics=1, set it\n");
4908 sdebug_statistics = true;
4909 }
4910 tweak_cmnd_count();
4911 return count;
4912 }
4913 return -EINVAL;
4914}
4915static DRIVER_ATTR_RW(every_nth);
4916
4917static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4918{
4919 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4920}
4921static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4922 size_t count)
4923{
4924 int n;
4925 bool changed;
4926
4927 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4928 if (n > 256) {
4929 pr_warn("max_luns can be no more than 256\n");
4930 return -EINVAL;
4931 }
4932 changed = (sdebug_max_luns != n);
4933 sdebug_max_luns = n;
4934 sdebug_max_tgts_luns();
4935 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
4936 struct sdebug_host_info *sdhp;
4937 struct sdebug_dev_info *dp;
4938
4939 spin_lock(&sdebug_host_list_lock);
4940 list_for_each_entry(sdhp, &sdebug_host_list,
4941 host_list) {
4942 list_for_each_entry(dp, &sdhp->dev_info_list,
4943 dev_list) {
4944 set_bit(SDEBUG_UA_LUNS_CHANGED,
4945 dp->uas_bm);
4946 }
4947 }
4948 spin_unlock(&sdebug_host_list_lock);
4949 }
4950 return count;
4951 }
4952 return -EINVAL;
4953}
4954static DRIVER_ATTR_RW(max_luns);
4955
4956static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4957{
4958 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4959}
4960/* N.B. max_queue can be changed while there are queued commands. In flight
4961 * commands beyond the new max_queue will be completed. */
4962static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4963 size_t count)
4964{
4965 int j, n, k, a;
4966 struct sdebug_queue *sqp;
4967
4968 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4969 (n <= SDEBUG_CANQUEUE)) {
4970 block_unblock_all_queues(true);
4971 k = 0;
4972 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4973 ++j, ++sqp) {
4974 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4975 if (a > k)
4976 k = a;
4977 }
4978 sdebug_max_queue = n;
4979 if (k == SDEBUG_CANQUEUE)
4980 atomic_set(&retired_max_queue, 0);
4981 else if (k >= n)
4982 atomic_set(&retired_max_queue, k + 1);
4983 else
4984 atomic_set(&retired_max_queue, 0);
4985 block_unblock_all_queues(false);
4986 return count;
4987 }
4988 return -EINVAL;
4989}
4990static DRIVER_ATTR_RW(max_queue);
4991
4992static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4993{
4994 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4995}
4996static DRIVER_ATTR_RO(no_uld);
4997
4998static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4999{
5000 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5001}
5002static DRIVER_ATTR_RO(scsi_level);
5003
5004static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5005{
5006 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5007}
5008static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5009 size_t count)
5010{
5011 int n;
5012 bool changed;
5013
5014 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5015 changed = (sdebug_virtual_gb != n);
5016 sdebug_virtual_gb = n;
5017 sdebug_capacity = get_sdebug_capacity();
5018 if (changed) {
5019 struct sdebug_host_info *sdhp;
5020 struct sdebug_dev_info *dp;
5021
5022 spin_lock(&sdebug_host_list_lock);
5023 list_for_each_entry(sdhp, &sdebug_host_list,
5024 host_list) {
5025 list_for_each_entry(dp, &sdhp->dev_info_list,
5026 dev_list) {
5027 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5028 dp->uas_bm);
5029 }
5030 }
5031 spin_unlock(&sdebug_host_list_lock);
5032 }
5033 return count;
5034 }
5035 return -EINVAL;
5036}
5037static DRIVER_ATTR_RW(virtual_gb);
5038
5039static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5040{
5041 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5042}
5043
5044static int sdebug_add_adapter(void);
5045static void sdebug_remove_adapter(void);
5046
5047static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5048 size_t count)
5049{
5050 int delta_hosts;
5051
5052 if (sscanf(buf, "%d", &delta_hosts) != 1)
5053 return -EINVAL;
5054 if (delta_hosts > 0) {
5055 do {
5056 sdebug_add_adapter();
5057 } while (--delta_hosts);
5058 } else if (delta_hosts < 0) {
5059 do {
5060 sdebug_remove_adapter();
5061 } while (++delta_hosts);
5062 }
5063 return count;
5064}
5065static DRIVER_ATTR_RW(add_host);
5066
5067static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5068{
5069 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5070}
5071static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5072 size_t count)
5073{
5074 int n;
5075
5076 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5077 sdebug_vpd_use_hostno = n;
5078 return count;
5079 }
5080 return -EINVAL;
5081}
5082static DRIVER_ATTR_RW(vpd_use_hostno);
5083
5084static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5085{
5086 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5087}
5088static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5089 size_t count)
5090{
5091 int n;
5092
5093 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5094 if (n > 0)
5095 sdebug_statistics = true;
5096 else {
5097 clear_queue_stats();
5098 sdebug_statistics = false;
5099 }
5100 return count;
5101 }
5102 return -EINVAL;
5103}
5104static DRIVER_ATTR_RW(statistics);
5105
5106static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5107{
5108 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5109}
5110static DRIVER_ATTR_RO(sector_size);
5111
5112static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5113{
5114 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5115}
5116static DRIVER_ATTR_RO(submit_queues);
5117
5118static ssize_t dix_show(struct device_driver *ddp, char *buf)
5119{
5120 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5121}
5122static DRIVER_ATTR_RO(dix);
5123
5124static ssize_t dif_show(struct device_driver *ddp, char *buf)
5125{
5126 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5127}
5128static DRIVER_ATTR_RO(dif);
5129
5130static ssize_t guard_show(struct device_driver *ddp, char *buf)
5131{
5132 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5133}
5134static DRIVER_ATTR_RO(guard);
5135
5136static ssize_t ato_show(struct device_driver *ddp, char *buf)
5137{
5138 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5139}
5140static DRIVER_ATTR_RO(ato);
5141
5142static ssize_t map_show(struct device_driver *ddp, char *buf)
5143{
5144 ssize_t count;
5145
5146 if (!scsi_debug_lbp())
5147 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5148 sdebug_store_sectors);
5149
5150 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5151 (int)map_size, map_storep);
5152 buf[count++] = '\n';
5153 buf[count] = '\0';
5154
5155 return count;
5156}
5157static DRIVER_ATTR_RO(map);
5158
5159static ssize_t removable_show(struct device_driver *ddp, char *buf)
5160{
5161 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5162}
5163static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5164 size_t count)
5165{
5166 int n;
5167
5168 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5169 sdebug_removable = (n > 0);
5170 return count;
5171 }
5172 return -EINVAL;
5173}
5174static DRIVER_ATTR_RW(removable);
5175
5176static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5177{
5178 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5179}
5180/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5181static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5182 size_t count)
5183{
5184 int n;
5185
5186 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5187 sdebug_host_lock = (n > 0);
5188 return count;
5189 }
5190 return -EINVAL;
5191}
5192static DRIVER_ATTR_RW(host_lock);
5193
5194static ssize_t strict_show(struct device_driver *ddp, char *buf)
5195{
5196 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5197}
5198static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5199 size_t count)
5200{
5201 int n;
5202
5203 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5204 sdebug_strict = (n > 0);
5205 return count;
5206 }
5207 return -EINVAL;
5208}
5209static DRIVER_ATTR_RW(strict);
5210
5211static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5212{
5213 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5214}
5215static DRIVER_ATTR_RO(uuid_ctl);
5216
5217static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5218{
5219 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5220}
5221static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5222 size_t count)
5223{
5224 int ret, n;
5225
5226 ret = kstrtoint(buf, 0, &n);
5227 if (ret)
5228 return ret;
5229 sdebug_cdb_len = n;
5230 all_config_cdb_len();
5231 return count;
5232}
5233static DRIVER_ATTR_RW(cdb_len);
5234
5235
5236/* Note: The following array creates attribute files in the
5237 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5238 files (over those found in the /sys/module/scsi_debug/parameters
5239 directory) is that auxiliary actions can be triggered when an attribute
5240 is changed. For example see: sdebug_add_host_store() above.
5241 */
5242
5243static struct attribute *sdebug_drv_attrs[] = {
5244 &driver_attr_delay.attr,
5245 &driver_attr_opts.attr,
5246 &driver_attr_ptype.attr,
5247 &driver_attr_dsense.attr,
5248 &driver_attr_fake_rw.attr,
5249 &driver_attr_no_lun_0.attr,
5250 &driver_attr_num_tgts.attr,
5251 &driver_attr_dev_size_mb.attr,
5252 &driver_attr_num_parts.attr,
5253 &driver_attr_every_nth.attr,
5254 &driver_attr_max_luns.attr,
5255 &driver_attr_max_queue.attr,
5256 &driver_attr_no_uld.attr,
5257 &driver_attr_scsi_level.attr,
5258 &driver_attr_virtual_gb.attr,
5259 &driver_attr_add_host.attr,
5260 &driver_attr_vpd_use_hostno.attr,
5261 &driver_attr_sector_size.attr,
5262 &driver_attr_statistics.attr,
5263 &driver_attr_submit_queues.attr,
5264 &driver_attr_dix.attr,
5265 &driver_attr_dif.attr,
5266 &driver_attr_guard.attr,
5267 &driver_attr_ato.attr,
5268 &driver_attr_map.attr,
5269 &driver_attr_removable.attr,
5270 &driver_attr_host_lock.attr,
5271 &driver_attr_ndelay.attr,
5272 &driver_attr_strict.attr,
5273 &driver_attr_uuid_ctl.attr,
5274 &driver_attr_cdb_len.attr,
5275 NULL,
5276};
5277ATTRIBUTE_GROUPS(sdebug_drv);
5278
5279static struct device *pseudo_primary;
5280
5281static int __init scsi_debug_init(void)
5282{
5283 unsigned long sz;
5284 int host_to_add;
5285 int k;
5286 int ret;
5287
5288 atomic_set(&retired_max_queue, 0);
5289
5290 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5291 pr_warn("ndelay must be less than 1 second, ignored\n");
5292 sdebug_ndelay = 0;
5293 } else if (sdebug_ndelay > 0)
5294 sdebug_jdelay = JDELAY_OVERRIDDEN;
5295
5296 switch (sdebug_sector_size) {
5297 case 512:
5298 case 1024:
5299 case 2048:
5300 case 4096:
5301 break;
5302 default:
5303 pr_err("invalid sector_size %d\n", sdebug_sector_size);
5304 return -EINVAL;
5305 }
5306
5307 switch (sdebug_dif) {
5308 case T10_PI_TYPE0_PROTECTION:
5309 break;
5310 case T10_PI_TYPE1_PROTECTION:
5311 case T10_PI_TYPE2_PROTECTION:
5312 case T10_PI_TYPE3_PROTECTION:
5313 have_dif_prot = true;
5314 break;
5315
5316 default:
5317 pr_err("dif must be 0, 1, 2 or 3\n");
5318 return -EINVAL;
5319 }
5320
5321 if (sdebug_guard > 1) {
5322 pr_err("guard must be 0 or 1\n");
5323 return -EINVAL;
5324 }
5325
5326 if (sdebug_ato > 1) {
5327 pr_err("ato must be 0 or 1\n");
5328 return -EINVAL;
5329 }
5330
5331 if (sdebug_physblk_exp > 15) {
5332 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5333 return -EINVAL;
5334 }
5335 if (sdebug_max_luns > 256) {
5336 pr_warn("max_luns can be no more than 256, use default\n");
5337 sdebug_max_luns = DEF_MAX_LUNS;
5338 }
5339
5340 if (sdebug_lowest_aligned > 0x3fff) {
5341 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5342 return -EINVAL;
5343 }
5344
5345 if (submit_queues < 1) {
5346 pr_err("submit_queues must be 1 or more\n");
5347 return -EINVAL;
5348 }
5349 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5350 GFP_KERNEL);
5351 if (sdebug_q_arr == NULL)
5352 return -ENOMEM;
5353 for (k = 0; k < submit_queues; ++k)
5354 spin_lock_init(&sdebug_q_arr[k].qc_lock);
5355
5356 if (sdebug_dev_size_mb < 1)
5357 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
5358 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5359 sdebug_store_sectors = sz / sdebug_sector_size;
5360 sdebug_capacity = get_sdebug_capacity();
5361
5362 /* play around with geometry, don't waste too much on track 0 */
5363 sdebug_heads = 8;
5364 sdebug_sectors_per = 32;
5365 if (sdebug_dev_size_mb >= 256)
5366 sdebug_heads = 64;
5367 else if (sdebug_dev_size_mb >= 16)
5368 sdebug_heads = 32;
5369 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5370 (sdebug_sectors_per * sdebug_heads);
5371 if (sdebug_cylinders_per >= 1024) {
5372 /* other LLDs do this; implies >= 1GB ram disk ... */
5373 sdebug_heads = 255;
5374 sdebug_sectors_per = 63;
5375 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5376 (sdebug_sectors_per * sdebug_heads);
5377 }
5378
5379 if (sdebug_fake_rw == 0) {
5380 fake_storep = vmalloc(sz);
5381 if (NULL == fake_storep) {
5382 pr_err("out of memory, 1\n");
5383 ret = -ENOMEM;
5384 goto free_q_arr;
5385 }
5386 memset(fake_storep, 0, sz);
5387 if (sdebug_num_parts > 0)
5388 sdebug_build_parts(fake_storep, sz);
5389 }
5390
5391 if (sdebug_dix) {
5392 int dif_size;
5393
5394 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5395 dif_storep = vmalloc(dif_size);
5396
5397 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5398
5399 if (dif_storep == NULL) {
5400 pr_err("out of mem. (DIX)\n");
5401 ret = -ENOMEM;
5402 goto free_vm;
5403 }
5404
5405 memset(dif_storep, 0xff, dif_size);
5406 }
5407
5408 /* Logical Block Provisioning */
5409 if (scsi_debug_lbp()) {
5410 sdebug_unmap_max_blocks =
5411 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5412
5413 sdebug_unmap_max_desc =
5414 clamp(sdebug_unmap_max_desc, 0U, 256U);
5415
5416 sdebug_unmap_granularity =
5417 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5418
5419 if (sdebug_unmap_alignment &&
5420 sdebug_unmap_granularity <=
5421 sdebug_unmap_alignment) {
5422 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5423 ret = -EINVAL;
5424 goto free_vm;
5425 }
5426
5427 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5428 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5429
5430 pr_info("%lu provisioning blocks\n", map_size);
5431
5432 if (map_storep == NULL) {
5433 pr_err("out of mem. (MAP)\n");
5434 ret = -ENOMEM;
5435 goto free_vm;
5436 }
5437
5438 bitmap_zero(map_storep, map_size);
5439
5440 /* Map first 1KB for partition table */
5441 if (sdebug_num_parts)
5442 map_region(0, 2);
5443 }
5444
5445 pseudo_primary = root_device_register("pseudo_0");
5446 if (IS_ERR(pseudo_primary)) {
5447 pr_warn("root_device_register() error\n");
5448 ret = PTR_ERR(pseudo_primary);
5449 goto free_vm;
5450 }
5451 ret = bus_register(&pseudo_lld_bus);
5452 if (ret < 0) {
5453 pr_warn("bus_register error: %d\n", ret);
5454 goto dev_unreg;
5455 }
5456 ret = driver_register(&sdebug_driverfs_driver);
5457 if (ret < 0) {
5458 pr_warn("driver_register error: %d\n", ret);
5459 goto bus_unreg;
5460 }
5461
5462 host_to_add = sdebug_add_host;
5463 sdebug_add_host = 0;
5464
5465 for (k = 0; k < host_to_add; k++) {
5466 if (sdebug_add_adapter()) {
5467 pr_err("sdebug_add_adapter failed k=%d\n", k);
5468 break;
5469 }
5470 }
5471
5472 if (sdebug_verbose)
5473 pr_info("built %d host(s)\n", sdebug_add_host);
5474
5475 return 0;
5476
5477bus_unreg:
5478 bus_unregister(&pseudo_lld_bus);
5479dev_unreg:
5480 root_device_unregister(pseudo_primary);
5481free_vm:
5482 vfree(map_storep);
5483 vfree(dif_storep);
5484 vfree(fake_storep);
5485free_q_arr:
5486 kfree(sdebug_q_arr);
5487 return ret;
5488}
5489
5490static void __exit scsi_debug_exit(void)
5491{
5492 int k = sdebug_add_host;
5493
5494 stop_all_queued();
5495 free_all_queued();
5496 for (; k; k--)
5497 sdebug_remove_adapter();
5498 driver_unregister(&sdebug_driverfs_driver);
5499 bus_unregister(&pseudo_lld_bus);
5500 root_device_unregister(pseudo_primary);
5501
5502 vfree(map_storep);
5503 vfree(dif_storep);
5504 vfree(fake_storep);
5505 kfree(sdebug_q_arr);
5506}
5507
5508device_initcall(scsi_debug_init);
5509module_exit(scsi_debug_exit);
5510
5511static void sdebug_release_adapter(struct device *dev)
5512{
5513 struct sdebug_host_info *sdbg_host;
5514
5515 sdbg_host = to_sdebug_host(dev);
5516 kfree(sdbg_host);
5517}
5518
5519static int sdebug_add_adapter(void)
5520{
5521 int k, devs_per_host;
5522 int error = 0;
5523 struct sdebug_host_info *sdbg_host;
5524 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5525
5526 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5527 if (sdbg_host == NULL) {
5528 pr_err("out of memory at line %d\n", __LINE__);
5529 return -ENOMEM;
5530 }
5531
5532 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5533
5534 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5535 for (k = 0; k < devs_per_host; k++) {
5536 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5537 if (!sdbg_devinfo) {
5538 pr_err("out of memory at line %d\n", __LINE__);
5539 error = -ENOMEM;
5540 goto clean;
5541 }
5542 }
5543
5544 spin_lock(&sdebug_host_list_lock);
5545 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5546 spin_unlock(&sdebug_host_list_lock);
5547
5548 sdbg_host->dev.bus = &pseudo_lld_bus;
5549 sdbg_host->dev.parent = pseudo_primary;
5550 sdbg_host->dev.release = &sdebug_release_adapter;
5551 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5552
5553 error = device_register(&sdbg_host->dev);
5554
5555 if (error)
5556 goto clean;
5557
5558 ++sdebug_add_host;
5559 return error;
5560
5561clean:
5562 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5563 dev_list) {
5564 list_del(&sdbg_devinfo->dev_list);
5565 kfree(sdbg_devinfo);
5566 }
5567
5568 kfree(sdbg_host);
5569 return error;
5570}
5571
5572static void sdebug_remove_adapter(void)
5573{
5574 struct sdebug_host_info *sdbg_host = NULL;
5575
5576 spin_lock(&sdebug_host_list_lock);
5577 if (!list_empty(&sdebug_host_list)) {
5578 sdbg_host = list_entry(sdebug_host_list.prev,
5579 struct sdebug_host_info, host_list);
5580 list_del(&sdbg_host->host_list);
5581 }
5582 spin_unlock(&sdebug_host_list_lock);
5583
5584 if (!sdbg_host)
5585 return;
5586
5587 device_unregister(&sdbg_host->dev);
5588 --sdebug_add_host;
5589}
5590
5591static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5592{
5593 int num_in_q = 0;
5594 struct sdebug_dev_info *devip;
5595
5596 block_unblock_all_queues(true);
5597 devip = (struct sdebug_dev_info *)sdev->hostdata;
5598 if (NULL == devip) {
5599 block_unblock_all_queues(false);
5600 return -ENODEV;
5601 }
5602 num_in_q = atomic_read(&devip->num_in_q);
5603
5604 if (qdepth < 1)
5605 qdepth = 1;
5606 /* allow to exceed max host qc_arr elements for testing */
5607 if (qdepth > SDEBUG_CANQUEUE + 10)
5608 qdepth = SDEBUG_CANQUEUE + 10;
5609 scsi_change_queue_depth(sdev, qdepth);
5610
5611 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5612 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5613 __func__, qdepth, num_in_q);
5614 }
5615 block_unblock_all_queues(false);
5616 return sdev->queue_depth;
5617}
5618
5619static bool fake_timeout(struct scsi_cmnd *scp)
5620{
5621 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5622 if (sdebug_every_nth < -1)
5623 sdebug_every_nth = -1;
5624 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5625 return true; /* ignore command causing timeout */
5626 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5627 scsi_medium_access_command(scp))
5628 return true; /* time out reads and writes */
5629 }
5630 return false;
5631}
5632
5633static bool fake_host_busy(struct scsi_cmnd *scp)
5634{
5635 return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5636 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5637}
5638
5639static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5640 struct scsi_cmnd *scp)
5641{
5642 u8 sdeb_i;
5643 struct scsi_device *sdp = scp->device;
5644 const struct opcode_info_t *oip;
5645 const struct opcode_info_t *r_oip;
5646 struct sdebug_dev_info *devip;
5647 u8 *cmd = scp->cmnd;
5648 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5649 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5650 int k, na;
5651 int errsts = 0;
5652 u32 flags;
5653 u16 sa;
5654 u8 opcode = cmd[0];
5655 bool has_wlun_rl;
5656
5657 scsi_set_resid(scp, 0);
5658 if (sdebug_statistics)
5659 atomic_inc(&sdebug_cmnd_count);
5660 if (unlikely(sdebug_verbose &&
5661 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5662 char b[120];
5663 int n, len, sb;
5664
5665 len = scp->cmd_len;
5666 sb = (int)sizeof(b);
5667 if (len > 32)
5668 strcpy(b, "too long, over 32 bytes");
5669 else {
5670 for (k = 0, n = 0; k < len && n < sb; ++k)
5671 n += scnprintf(b + n, sb - n, "%02x ",
5672 (u32)cmd[k]);
5673 }
5674 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5675 blk_mq_unique_tag(scp->request), b);
5676 }
5677 if (fake_host_busy(scp))
5678 return SCSI_MLQUEUE_HOST_BUSY;
5679 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5680 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5681 goto err_out;
5682
5683 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5684 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5685 devip = (struct sdebug_dev_info *)sdp->hostdata;
5686 if (unlikely(!devip)) {
5687 devip = find_build_dev_info(sdp);
5688 if (NULL == devip)
5689 goto err_out;
5690 }
5691 na = oip->num_attached;
5692 r_pfp = oip->pfp;
5693 if (na) { /* multiple commands with this opcode */
5694 r_oip = oip;
5695 if (FF_SA & r_oip->flags) {
5696 if (F_SA_LOW & oip->flags)
5697 sa = 0x1f & cmd[1];
5698 else
5699 sa = get_unaligned_be16(cmd + 8);
5700 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5701 if (opcode == oip->opcode && sa == oip->sa)
5702 break;
5703 }
5704 } else { /* since no service action only check opcode */
5705 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5706 if (opcode == oip->opcode)
5707 break;
5708 }
5709 }
5710 if (k > na) {
5711 if (F_SA_LOW & r_oip->flags)
5712 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5713 else if (F_SA_HIGH & r_oip->flags)
5714 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5715 else
5716 mk_sense_invalid_opcode(scp);
5717 goto check_cond;
5718 }
5719 } /* else (when na==0) we assume the oip is a match */
5720 flags = oip->flags;
5721 if (unlikely(F_INV_OP & flags)) {
5722 mk_sense_invalid_opcode(scp);
5723 goto check_cond;
5724 }
5725 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5726 if (sdebug_verbose)
5727 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5728 my_name, opcode, " supported for wlun");
5729 mk_sense_invalid_opcode(scp);
5730 goto check_cond;
5731 }
5732 if (unlikely(sdebug_strict)) { /* check cdb against mask */
5733 u8 rem;
5734 int j;
5735
5736 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5737 rem = ~oip->len_mask[k] & cmd[k];
5738 if (rem) {
5739 for (j = 7; j >= 0; --j, rem <<= 1) {
5740 if (0x80 & rem)
5741 break;
5742 }
5743 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5744 goto check_cond;
5745 }
5746 }
5747 }
5748 if (unlikely(!(F_SKIP_UA & flags) &&
5749 find_first_bit(devip->uas_bm,
5750 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5751 errsts = make_ua(scp, devip);
5752 if (errsts)
5753 goto check_cond;
5754 }
5755 if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5756 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5757 if (sdebug_verbose)
5758 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5759 "%s\n", my_name, "initializing command "
5760 "required");
5761 errsts = check_condition_result;
5762 goto fini;
5763 }
5764 if (sdebug_fake_rw && (F_FAKE_RW & flags))
5765 goto fini;
5766 if (unlikely(sdebug_every_nth)) {
5767 if (fake_timeout(scp))
5768 return 0; /* ignore command: make trouble */
5769 }
5770 if (likely(oip->pfp))
5771 pfp = oip->pfp; /* calls a resp_* function */
5772 else
5773 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
5774
5775fini:
5776 if (F_DELAY_OVERR & flags)
5777 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5778 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5779 /*
5780 * If any delay is active, want F_LONG_DELAY to be at least 1
5781 * second and if sdebug_jdelay>0 want a long delay of that
5782 * many seconds.
5783 */
5784 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5785
5786 jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ);
5787 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5788 } else
5789 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5790 sdebug_ndelay);
5791check_cond:
5792 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5793err_out:
5794 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5795}
5796
5797static struct scsi_host_template sdebug_driver_template = {
5798 .show_info = scsi_debug_show_info,
5799 .write_info = scsi_debug_write_info,
5800 .proc_name = sdebug_proc_name,
5801 .name = "SCSI DEBUG",
5802 .info = scsi_debug_info,
5803 .slave_alloc = scsi_debug_slave_alloc,
5804 .slave_configure = scsi_debug_slave_configure,
5805 .slave_destroy = scsi_debug_slave_destroy,
5806 .ioctl = scsi_debug_ioctl,
5807 .queuecommand = scsi_debug_queuecommand,
5808 .change_queue_depth = sdebug_change_qdepth,
5809 .eh_abort_handler = scsi_debug_abort,
5810 .eh_device_reset_handler = scsi_debug_device_reset,
5811 .eh_target_reset_handler = scsi_debug_target_reset,
5812 .eh_bus_reset_handler = scsi_debug_bus_reset,
5813 .eh_host_reset_handler = scsi_debug_host_reset,
5814 .can_queue = SDEBUG_CANQUEUE,
5815 .this_id = 7,
5816 .sg_tablesize = SG_MAX_SEGMENTS,
5817 .cmd_per_lun = DEF_CMD_PER_LUN,
5818 .max_sectors = -1U,
5819 .use_clustering = DISABLE_CLUSTERING,
5820 .module = THIS_MODULE,
5821 .track_queue_depth = 1,
5822};
5823
5824static int sdebug_driver_probe(struct device *dev)
5825{
5826 int error = 0;
5827 struct sdebug_host_info *sdbg_host;
5828 struct Scsi_Host *hpnt;
5829 int hprot;
5830
5831 sdbg_host = to_sdebug_host(dev);
5832
5833 sdebug_driver_template.can_queue = sdebug_max_queue;
5834 if (sdebug_clustering)
5835 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5836 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5837 if (NULL == hpnt) {
5838 pr_err("scsi_host_alloc failed\n");
5839 error = -ENODEV;
5840 return error;
5841 }
5842 if (submit_queues > nr_cpu_ids) {
5843 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5844 my_name, submit_queues, nr_cpu_ids);
5845 submit_queues = nr_cpu_ids;
5846 }
5847 /* Decide whether to tell scsi subsystem that we want mq */
5848 /* Following should give the same answer for each host */
5849 if (shost_use_blk_mq(hpnt))
5850 hpnt->nr_hw_queues = submit_queues;
5851
5852 sdbg_host->shost = hpnt;
5853 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5854 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5855 hpnt->max_id = sdebug_num_tgts + 1;
5856 else
5857 hpnt->max_id = sdebug_num_tgts;
5858 /* = sdebug_max_luns; */
5859 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5860
5861 hprot = 0;
5862
5863 switch (sdebug_dif) {
5864
5865 case T10_PI_TYPE1_PROTECTION:
5866 hprot = SHOST_DIF_TYPE1_PROTECTION;
5867 if (sdebug_dix)
5868 hprot |= SHOST_DIX_TYPE1_PROTECTION;
5869 break;
5870
5871 case T10_PI_TYPE2_PROTECTION:
5872 hprot = SHOST_DIF_TYPE2_PROTECTION;
5873 if (sdebug_dix)
5874 hprot |= SHOST_DIX_TYPE2_PROTECTION;
5875 break;
5876
5877 case T10_PI_TYPE3_PROTECTION:
5878 hprot = SHOST_DIF_TYPE3_PROTECTION;
5879 if (sdebug_dix)
5880 hprot |= SHOST_DIX_TYPE3_PROTECTION;
5881 break;
5882
5883 default:
5884 if (sdebug_dix)
5885 hprot |= SHOST_DIX_TYPE0_PROTECTION;
5886 break;
5887 }
5888
5889 scsi_host_set_prot(hpnt, hprot);
5890
5891 if (have_dif_prot || sdebug_dix)
5892 pr_info("host protection%s%s%s%s%s%s%s\n",
5893 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5894 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5895 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5896 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5897 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5898 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5899 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5900
5901 if (sdebug_guard == 1)
5902 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5903 else
5904 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5905
5906 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5907 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5908 if (sdebug_every_nth) /* need stats counters for every_nth */
5909 sdebug_statistics = true;
5910 error = scsi_add_host(hpnt, &sdbg_host->dev);
5911 if (error) {
5912 pr_err("scsi_add_host failed\n");
5913 error = -ENODEV;
5914 scsi_host_put(hpnt);
5915 } else
5916 scsi_scan_host(hpnt);
5917
5918 return error;
5919}
5920
5921static int sdebug_driver_remove(struct device *dev)
5922{
5923 struct sdebug_host_info *sdbg_host;
5924 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5925
5926 sdbg_host = to_sdebug_host(dev);
5927
5928 if (!sdbg_host) {
5929 pr_err("Unable to locate host info\n");
5930 return -ENODEV;
5931 }
5932
5933 scsi_remove_host(sdbg_host->shost);
5934
5935 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5936 dev_list) {
5937 list_del(&sdbg_devinfo->dev_list);
5938 kfree(sdbg_devinfo);
5939 }
5940
5941 scsi_host_put(sdbg_host->shost);
5942 return 0;
5943}
5944
5945static int pseudo_lld_bus_match(struct device *dev,
5946 struct device_driver *dev_driver)
5947{
5948 return 1;
5949}
5950
5951static struct bus_type pseudo_lld_bus = {
5952 .name = "pseudo",
5953 .match = pseudo_lld_bus_match,
5954 .probe = sdebug_driver_probe,
5955 .remove = sdebug_driver_remove,
5956 .drv_groups = sdebug_drv_groups,
5957};