Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/pci-aspm.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59/*
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
62 */
63#define HPSA_DRIVER_VERSION "3.4.20-170"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67/* How long to wait for CISS doorbell communication */
68#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74/*define how many times we will try a command because of bus resets */
75#define MAX_CMD_RETRIES 3
76/* How long to wait before giving up on a command */
77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79/* Embedded module documentation macros - see modules.h */
80MODULE_AUTHOR("Hewlett-Packard Company");
81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 HPSA_DRIVER_VERSION);
83MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
84MODULE_VERSION(HPSA_DRIVER_VERSION);
85MODULE_LICENSE("GPL");
86MODULE_ALIAS("cciss");
87
88static int hpsa_simple_mode;
89module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(hpsa_simple_mode,
91 "Use 'simple mode' rather than 'performant mode'");
92
93/* define the PCI info for the cards we can control */
94static const struct pci_device_id hpsa_pci_device_id[] = {
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
136 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
142 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
146 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
147 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
148 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
151 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152 {0,}
153};
154
155MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
156
157/* board_id = Subsystem Device ID & Vendor ID
158 * product = Marketing Name for the board
159 * access = Address of the struct of function pointers
160 */
161static struct board_type products[] = {
162 {0x40700E11, "Smart Array 5300", &SA5A_access},
163 {0x40800E11, "Smart Array 5i", &SA5B_access},
164 {0x40820E11, "Smart Array 532", &SA5B_access},
165 {0x40830E11, "Smart Array 5312", &SA5B_access},
166 {0x409A0E11, "Smart Array 641", &SA5A_access},
167 {0x409B0E11, "Smart Array 642", &SA5A_access},
168 {0x409C0E11, "Smart Array 6400", &SA5A_access},
169 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
170 {0x40910E11, "Smart Array 6i", &SA5A_access},
171 {0x3225103C, "Smart Array P600", &SA5A_access},
172 {0x3223103C, "Smart Array P800", &SA5A_access},
173 {0x3234103C, "Smart Array P400", &SA5A_access},
174 {0x3235103C, "Smart Array P400i", &SA5A_access},
175 {0x3211103C, "Smart Array E200i", &SA5A_access},
176 {0x3212103C, "Smart Array E200", &SA5A_access},
177 {0x3213103C, "Smart Array E200i", &SA5A_access},
178 {0x3214103C, "Smart Array E200i", &SA5A_access},
179 {0x3215103C, "Smart Array E200i", &SA5A_access},
180 {0x3237103C, "Smart Array E500", &SA5A_access},
181 {0x323D103C, "Smart Array P700m", &SA5A_access},
182 {0x3241103C, "Smart Array P212", &SA5_access},
183 {0x3243103C, "Smart Array P410", &SA5_access},
184 {0x3245103C, "Smart Array P410i", &SA5_access},
185 {0x3247103C, "Smart Array P411", &SA5_access},
186 {0x3249103C, "Smart Array P812", &SA5_access},
187 {0x324A103C, "Smart Array P712m", &SA5_access},
188 {0x324B103C, "Smart Array P711m", &SA5_access},
189 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
190 {0x3350103C, "Smart Array P222", &SA5_access},
191 {0x3351103C, "Smart Array P420", &SA5_access},
192 {0x3352103C, "Smart Array P421", &SA5_access},
193 {0x3353103C, "Smart Array P822", &SA5_access},
194 {0x3354103C, "Smart Array P420i", &SA5_access},
195 {0x3355103C, "Smart Array P220i", &SA5_access},
196 {0x3356103C, "Smart Array P721m", &SA5_access},
197 {0x1920103C, "Smart Array P430i", &SA5_access},
198 {0x1921103C, "Smart Array P830i", &SA5_access},
199 {0x1922103C, "Smart Array P430", &SA5_access},
200 {0x1923103C, "Smart Array P431", &SA5_access},
201 {0x1924103C, "Smart Array P830", &SA5_access},
202 {0x1925103C, "Smart Array P831", &SA5_access},
203 {0x1926103C, "Smart Array P731m", &SA5_access},
204 {0x1928103C, "Smart Array P230i", &SA5_access},
205 {0x1929103C, "Smart Array P530", &SA5_access},
206 {0x21BD103C, "Smart Array P244br", &SA5_access},
207 {0x21BE103C, "Smart Array P741m", &SA5_access},
208 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
209 {0x21C0103C, "Smart Array P440ar", &SA5_access},
210 {0x21C1103C, "Smart Array P840ar", &SA5_access},
211 {0x21C2103C, "Smart Array P440", &SA5_access},
212 {0x21C3103C, "Smart Array P441", &SA5_access},
213 {0x21C4103C, "Smart Array", &SA5_access},
214 {0x21C5103C, "Smart Array P841", &SA5_access},
215 {0x21C6103C, "Smart HBA H244br", &SA5_access},
216 {0x21C7103C, "Smart HBA H240", &SA5_access},
217 {0x21C8103C, "Smart HBA H241", &SA5_access},
218 {0x21C9103C, "Smart Array", &SA5_access},
219 {0x21CA103C, "Smart Array P246br", &SA5_access},
220 {0x21CB103C, "Smart Array P840", &SA5_access},
221 {0x21CC103C, "Smart Array", &SA5_access},
222 {0x21CD103C, "Smart Array", &SA5_access},
223 {0x21CE103C, "Smart HBA", &SA5_access},
224 {0x05809005, "SmartHBA-SA", &SA5_access},
225 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
226 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
227 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
228 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
229 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
230 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
231 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
232 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
233 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
234 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
235 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236};
237
238static struct scsi_transport_template *hpsa_sas_transport_template;
239static int hpsa_add_sas_host(struct ctlr_info *h);
240static void hpsa_delete_sas_host(struct ctlr_info *h);
241static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
242 struct hpsa_scsi_dev_t *device);
243static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
244static struct hpsa_scsi_dev_t
245 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
246 struct sas_rphy *rphy);
247
248#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
249static const struct scsi_cmnd hpsa_cmd_busy;
250#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
251static const struct scsi_cmnd hpsa_cmd_idle;
252static int number_of_controllers;
253
254static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
255static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
256static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
257 void __user *arg);
258
259#ifdef CONFIG_COMPAT
260static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
261 void __user *arg);
262#endif
263
264static void cmd_free(struct ctlr_info *h, struct CommandList *c);
265static struct CommandList *cmd_alloc(struct ctlr_info *h);
266static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
267static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
268 struct scsi_cmnd *scmd);
269static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
270 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
271 int cmd_type);
272static void hpsa_free_cmd_pool(struct ctlr_info *h);
273#define VPD_PAGE (1 << 8)
274#define HPSA_SIMPLE_ERROR_BITS 0x03
275
276static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
277static void hpsa_scan_start(struct Scsi_Host *);
278static int hpsa_scan_finished(struct Scsi_Host *sh,
279 unsigned long elapsed_time);
280static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
281
282static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
283static int hpsa_slave_alloc(struct scsi_device *sdev);
284static int hpsa_slave_configure(struct scsi_device *sdev);
285static void hpsa_slave_destroy(struct scsi_device *sdev);
286
287static void hpsa_update_scsi_devices(struct ctlr_info *h);
288static int check_for_unit_attention(struct ctlr_info *h,
289 struct CommandList *c);
290static void check_ioctl_unit_attention(struct ctlr_info *h,
291 struct CommandList *c);
292/* performant mode helper functions */
293static void calc_bucket_map(int *bucket, int num_buckets,
294 int nsgs, int min_blocks, u32 *bucket_map);
295static void hpsa_free_performant_mode(struct ctlr_info *h);
296static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
297static inline u32 next_command(struct ctlr_info *h, u8 q);
298static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
299 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
300 u64 *cfg_offset);
301static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
302 unsigned long *memory_bar);
303static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
304 bool *legacy_board);
305static int wait_for_device_to_become_ready(struct ctlr_info *h,
306 unsigned char lunaddr[],
307 int reply_queue);
308static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
309 int wait_for_ready);
310static inline void finish_cmd(struct CommandList *c);
311static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
312#define BOARD_NOT_READY 0
313#define BOARD_READY 1
314static void hpsa_drain_accel_commands(struct ctlr_info *h);
315static void hpsa_flush_cache(struct ctlr_info *h);
316static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
317 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
318 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
319static void hpsa_command_resubmit_worker(struct work_struct *work);
320static u32 lockup_detected(struct ctlr_info *h);
321static int detect_controller_lockup(struct ctlr_info *h);
322static void hpsa_disable_rld_caching(struct ctlr_info *h);
323static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
324 struct ReportExtendedLUNdata *buf, int bufsize);
325static bool hpsa_vpd_page_supported(struct ctlr_info *h,
326 unsigned char scsi3addr[], u8 page);
327static int hpsa_luns_changed(struct ctlr_info *h);
328static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
329 struct hpsa_scsi_dev_t *dev,
330 unsigned char *scsi3addr);
331
332static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
333{
334 unsigned long *priv = shost_priv(sdev->host);
335 return (struct ctlr_info *) *priv;
336}
337
338static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
339{
340 unsigned long *priv = shost_priv(sh);
341 return (struct ctlr_info *) *priv;
342}
343
344static inline bool hpsa_is_cmd_idle(struct CommandList *c)
345{
346 return c->scsi_cmd == SCSI_CMD_IDLE;
347}
348
349/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
350static void decode_sense_data(const u8 *sense_data, int sense_data_len,
351 u8 *sense_key, u8 *asc, u8 *ascq)
352{
353 struct scsi_sense_hdr sshdr;
354 bool rc;
355
356 *sense_key = -1;
357 *asc = -1;
358 *ascq = -1;
359
360 if (sense_data_len < 1)
361 return;
362
363 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
364 if (rc) {
365 *sense_key = sshdr.sense_key;
366 *asc = sshdr.asc;
367 *ascq = sshdr.ascq;
368 }
369}
370
371static int check_for_unit_attention(struct ctlr_info *h,
372 struct CommandList *c)
373{
374 u8 sense_key, asc, ascq;
375 int sense_len;
376
377 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
378 sense_len = sizeof(c->err_info->SenseInfo);
379 else
380 sense_len = c->err_info->SenseLen;
381
382 decode_sense_data(c->err_info->SenseInfo, sense_len,
383 &sense_key, &asc, &ascq);
384 if (sense_key != UNIT_ATTENTION || asc == 0xff)
385 return 0;
386
387 switch (asc) {
388 case STATE_CHANGED:
389 dev_warn(&h->pdev->dev,
390 "%s: a state change detected, command retried\n",
391 h->devname);
392 break;
393 case LUN_FAILED:
394 dev_warn(&h->pdev->dev,
395 "%s: LUN failure detected\n", h->devname);
396 break;
397 case REPORT_LUNS_CHANGED:
398 dev_warn(&h->pdev->dev,
399 "%s: report LUN data changed\n", h->devname);
400 /*
401 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
402 * target (array) devices.
403 */
404 break;
405 case POWER_OR_RESET:
406 dev_warn(&h->pdev->dev,
407 "%s: a power on or device reset detected\n",
408 h->devname);
409 break;
410 case UNIT_ATTENTION_CLEARED:
411 dev_warn(&h->pdev->dev,
412 "%s: unit attention cleared by another initiator\n",
413 h->devname);
414 break;
415 default:
416 dev_warn(&h->pdev->dev,
417 "%s: unknown unit attention detected\n",
418 h->devname);
419 break;
420 }
421 return 1;
422}
423
424static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
425{
426 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
427 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
428 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
429 return 0;
430 dev_warn(&h->pdev->dev, HPSA "device busy");
431 return 1;
432}
433
434static u32 lockup_detected(struct ctlr_info *h);
435static ssize_t host_show_lockup_detected(struct device *dev,
436 struct device_attribute *attr, char *buf)
437{
438 int ld;
439 struct ctlr_info *h;
440 struct Scsi_Host *shost = class_to_shost(dev);
441
442 h = shost_to_hba(shost);
443 ld = lockup_detected(h);
444
445 return sprintf(buf, "ld=%d\n", ld);
446}
447
448static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
449 struct device_attribute *attr,
450 const char *buf, size_t count)
451{
452 int status, len;
453 struct ctlr_info *h;
454 struct Scsi_Host *shost = class_to_shost(dev);
455 char tmpbuf[10];
456
457 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
458 return -EACCES;
459 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
460 strncpy(tmpbuf, buf, len);
461 tmpbuf[len] = '\0';
462 if (sscanf(tmpbuf, "%d", &status) != 1)
463 return -EINVAL;
464 h = shost_to_hba(shost);
465 h->acciopath_status = !!status;
466 dev_warn(&h->pdev->dev,
467 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
468 h->acciopath_status ? "enabled" : "disabled");
469 return count;
470}
471
472static ssize_t host_store_raid_offload_debug(struct device *dev,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
475{
476 int debug_level, len;
477 struct ctlr_info *h;
478 struct Scsi_Host *shost = class_to_shost(dev);
479 char tmpbuf[10];
480
481 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
482 return -EACCES;
483 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
484 strncpy(tmpbuf, buf, len);
485 tmpbuf[len] = '\0';
486 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
487 return -EINVAL;
488 if (debug_level < 0)
489 debug_level = 0;
490 h = shost_to_hba(shost);
491 h->raid_offload_debug = debug_level;
492 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
493 h->raid_offload_debug);
494 return count;
495}
496
497static ssize_t host_store_rescan(struct device *dev,
498 struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 struct ctlr_info *h;
502 struct Scsi_Host *shost = class_to_shost(dev);
503 h = shost_to_hba(shost);
504 hpsa_scan_start(h->scsi_host);
505 return count;
506}
507
508static ssize_t host_show_firmware_revision(struct device *dev,
509 struct device_attribute *attr, char *buf)
510{
511 struct ctlr_info *h;
512 struct Scsi_Host *shost = class_to_shost(dev);
513 unsigned char *fwrev;
514
515 h = shost_to_hba(shost);
516 if (!h->hba_inquiry_data)
517 return 0;
518 fwrev = &h->hba_inquiry_data[32];
519 return snprintf(buf, 20, "%c%c%c%c\n",
520 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
521}
522
523static ssize_t host_show_commands_outstanding(struct device *dev,
524 struct device_attribute *attr, char *buf)
525{
526 struct Scsi_Host *shost = class_to_shost(dev);
527 struct ctlr_info *h = shost_to_hba(shost);
528
529 return snprintf(buf, 20, "%d\n",
530 atomic_read(&h->commands_outstanding));
531}
532
533static ssize_t host_show_transport_mode(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct ctlr_info *h;
537 struct Scsi_Host *shost = class_to_shost(dev);
538
539 h = shost_to_hba(shost);
540 return snprintf(buf, 20, "%s\n",
541 h->transMethod & CFGTBL_Trans_Performant ?
542 "performant" : "simple");
543}
544
545static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
546 struct device_attribute *attr, char *buf)
547{
548 struct ctlr_info *h;
549 struct Scsi_Host *shost = class_to_shost(dev);
550
551 h = shost_to_hba(shost);
552 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
553 (h->acciopath_status == 1) ? "enabled" : "disabled");
554}
555
556/* List of controllers which cannot be hard reset on kexec with reset_devices */
557static u32 unresettable_controller[] = {
558 0x324a103C, /* Smart Array P712m */
559 0x324b103C, /* Smart Array P711m */
560 0x3223103C, /* Smart Array P800 */
561 0x3234103C, /* Smart Array P400 */
562 0x3235103C, /* Smart Array P400i */
563 0x3211103C, /* Smart Array E200i */
564 0x3212103C, /* Smart Array E200 */
565 0x3213103C, /* Smart Array E200i */
566 0x3214103C, /* Smart Array E200i */
567 0x3215103C, /* Smart Array E200i */
568 0x3237103C, /* Smart Array E500 */
569 0x323D103C, /* Smart Array P700m */
570 0x40800E11, /* Smart Array 5i */
571 0x409C0E11, /* Smart Array 6400 */
572 0x409D0E11, /* Smart Array 6400 EM */
573 0x40700E11, /* Smart Array 5300 */
574 0x40820E11, /* Smart Array 532 */
575 0x40830E11, /* Smart Array 5312 */
576 0x409A0E11, /* Smart Array 641 */
577 0x409B0E11, /* Smart Array 642 */
578 0x40910E11, /* Smart Array 6i */
579};
580
581/* List of controllers which cannot even be soft reset */
582static u32 soft_unresettable_controller[] = {
583 0x40800E11, /* Smart Array 5i */
584 0x40700E11, /* Smart Array 5300 */
585 0x40820E11, /* Smart Array 532 */
586 0x40830E11, /* Smart Array 5312 */
587 0x409A0E11, /* Smart Array 641 */
588 0x409B0E11, /* Smart Array 642 */
589 0x40910E11, /* Smart Array 6i */
590 /* Exclude 640x boards. These are two pci devices in one slot
591 * which share a battery backed cache module. One controls the
592 * cache, the other accesses the cache through the one that controls
593 * it. If we reset the one controlling the cache, the other will
594 * likely not be happy. Just forbid resetting this conjoined mess.
595 * The 640x isn't really supported by hpsa anyway.
596 */
597 0x409C0E11, /* Smart Array 6400 */
598 0x409D0E11, /* Smart Array 6400 EM */
599};
600
601static int board_id_in_array(u32 a[], int nelems, u32 board_id)
602{
603 int i;
604
605 for (i = 0; i < nelems; i++)
606 if (a[i] == board_id)
607 return 1;
608 return 0;
609}
610
611static int ctlr_is_hard_resettable(u32 board_id)
612{
613 return !board_id_in_array(unresettable_controller,
614 ARRAY_SIZE(unresettable_controller), board_id);
615}
616
617static int ctlr_is_soft_resettable(u32 board_id)
618{
619 return !board_id_in_array(soft_unresettable_controller,
620 ARRAY_SIZE(soft_unresettable_controller), board_id);
621}
622
623static int ctlr_is_resettable(u32 board_id)
624{
625 return ctlr_is_hard_resettable(board_id) ||
626 ctlr_is_soft_resettable(board_id);
627}
628
629static ssize_t host_show_resettable(struct device *dev,
630 struct device_attribute *attr, char *buf)
631{
632 struct ctlr_info *h;
633 struct Scsi_Host *shost = class_to_shost(dev);
634
635 h = shost_to_hba(shost);
636 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
637}
638
639static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
640{
641 return (scsi3addr[3] & 0xC0) == 0x40;
642}
643
644static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
645 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
646};
647#define HPSA_RAID_0 0
648#define HPSA_RAID_4 1
649#define HPSA_RAID_1 2 /* also used for RAID 10 */
650#define HPSA_RAID_5 3 /* also used for RAID 50 */
651#define HPSA_RAID_51 4
652#define HPSA_RAID_6 5 /* also used for RAID 60 */
653#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
654#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
655#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
656
657static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
658{
659 return !device->physical_device;
660}
661
662static ssize_t raid_level_show(struct device *dev,
663 struct device_attribute *attr, char *buf)
664{
665 ssize_t l = 0;
666 unsigned char rlevel;
667 struct ctlr_info *h;
668 struct scsi_device *sdev;
669 struct hpsa_scsi_dev_t *hdev;
670 unsigned long flags;
671
672 sdev = to_scsi_device(dev);
673 h = sdev_to_hba(sdev);
674 spin_lock_irqsave(&h->lock, flags);
675 hdev = sdev->hostdata;
676 if (!hdev) {
677 spin_unlock_irqrestore(&h->lock, flags);
678 return -ENODEV;
679 }
680
681 /* Is this even a logical drive? */
682 if (!is_logical_device(hdev)) {
683 spin_unlock_irqrestore(&h->lock, flags);
684 l = snprintf(buf, PAGE_SIZE, "N/A\n");
685 return l;
686 }
687
688 rlevel = hdev->raid_level;
689 spin_unlock_irqrestore(&h->lock, flags);
690 if (rlevel > RAID_UNKNOWN)
691 rlevel = RAID_UNKNOWN;
692 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
693 return l;
694}
695
696static ssize_t lunid_show(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
699 struct ctlr_info *h;
700 struct scsi_device *sdev;
701 struct hpsa_scsi_dev_t *hdev;
702 unsigned long flags;
703 unsigned char lunid[8];
704
705 sdev = to_scsi_device(dev);
706 h = sdev_to_hba(sdev);
707 spin_lock_irqsave(&h->lock, flags);
708 hdev = sdev->hostdata;
709 if (!hdev) {
710 spin_unlock_irqrestore(&h->lock, flags);
711 return -ENODEV;
712 }
713 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
714 spin_unlock_irqrestore(&h->lock, flags);
715 return snprintf(buf, 20, "0x%8phN\n", lunid);
716}
717
718static ssize_t unique_id_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
720{
721 struct ctlr_info *h;
722 struct scsi_device *sdev;
723 struct hpsa_scsi_dev_t *hdev;
724 unsigned long flags;
725 unsigned char sn[16];
726
727 sdev = to_scsi_device(dev);
728 h = sdev_to_hba(sdev);
729 spin_lock_irqsave(&h->lock, flags);
730 hdev = sdev->hostdata;
731 if (!hdev) {
732 spin_unlock_irqrestore(&h->lock, flags);
733 return -ENODEV;
734 }
735 memcpy(sn, hdev->device_id, sizeof(sn));
736 spin_unlock_irqrestore(&h->lock, flags);
737 return snprintf(buf, 16 * 2 + 2,
738 "%02X%02X%02X%02X%02X%02X%02X%02X"
739 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
740 sn[0], sn[1], sn[2], sn[3],
741 sn[4], sn[5], sn[6], sn[7],
742 sn[8], sn[9], sn[10], sn[11],
743 sn[12], sn[13], sn[14], sn[15]);
744}
745
746static ssize_t sas_address_show(struct device *dev,
747 struct device_attribute *attr, char *buf)
748{
749 struct ctlr_info *h;
750 struct scsi_device *sdev;
751 struct hpsa_scsi_dev_t *hdev;
752 unsigned long flags;
753 u64 sas_address;
754
755 sdev = to_scsi_device(dev);
756 h = sdev_to_hba(sdev);
757 spin_lock_irqsave(&h->lock, flags);
758 hdev = sdev->hostdata;
759 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
760 spin_unlock_irqrestore(&h->lock, flags);
761 return -ENODEV;
762 }
763 sas_address = hdev->sas_address;
764 spin_unlock_irqrestore(&h->lock, flags);
765
766 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
767}
768
769static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
770 struct device_attribute *attr, char *buf)
771{
772 struct ctlr_info *h;
773 struct scsi_device *sdev;
774 struct hpsa_scsi_dev_t *hdev;
775 unsigned long flags;
776 int offload_enabled;
777
778 sdev = to_scsi_device(dev);
779 h = sdev_to_hba(sdev);
780 spin_lock_irqsave(&h->lock, flags);
781 hdev = sdev->hostdata;
782 if (!hdev) {
783 spin_unlock_irqrestore(&h->lock, flags);
784 return -ENODEV;
785 }
786 offload_enabled = hdev->offload_enabled;
787 spin_unlock_irqrestore(&h->lock, flags);
788
789 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
790 return snprintf(buf, 20, "%d\n", offload_enabled);
791 else
792 return snprintf(buf, 40, "%s\n",
793 "Not applicable for a controller");
794}
795
796#define MAX_PATHS 8
797static ssize_t path_info_show(struct device *dev,
798 struct device_attribute *attr, char *buf)
799{
800 struct ctlr_info *h;
801 struct scsi_device *sdev;
802 struct hpsa_scsi_dev_t *hdev;
803 unsigned long flags;
804 int i;
805 int output_len = 0;
806 u8 box;
807 u8 bay;
808 u8 path_map_index = 0;
809 char *active;
810 unsigned char phys_connector[2];
811
812 sdev = to_scsi_device(dev);
813 h = sdev_to_hba(sdev);
814 spin_lock_irqsave(&h->devlock, flags);
815 hdev = sdev->hostdata;
816 if (!hdev) {
817 spin_unlock_irqrestore(&h->devlock, flags);
818 return -ENODEV;
819 }
820
821 bay = hdev->bay;
822 for (i = 0; i < MAX_PATHS; i++) {
823 path_map_index = 1<<i;
824 if (i == hdev->active_path_index)
825 active = "Active";
826 else if (hdev->path_map & path_map_index)
827 active = "Inactive";
828 else
829 continue;
830
831 output_len += scnprintf(buf + output_len,
832 PAGE_SIZE - output_len,
833 "[%d:%d:%d:%d] %20.20s ",
834 h->scsi_host->host_no,
835 hdev->bus, hdev->target, hdev->lun,
836 scsi_device_type(hdev->devtype));
837
838 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
839 output_len += scnprintf(buf + output_len,
840 PAGE_SIZE - output_len,
841 "%s\n", active);
842 continue;
843 }
844
845 box = hdev->box[i];
846 memcpy(&phys_connector, &hdev->phys_connector[i],
847 sizeof(phys_connector));
848 if (phys_connector[0] < '0')
849 phys_connector[0] = '0';
850 if (phys_connector[1] < '0')
851 phys_connector[1] = '0';
852 output_len += scnprintf(buf + output_len,
853 PAGE_SIZE - output_len,
854 "PORT: %.2s ",
855 phys_connector);
856 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
857 hdev->expose_device) {
858 if (box == 0 || box == 0xFF) {
859 output_len += scnprintf(buf + output_len,
860 PAGE_SIZE - output_len,
861 "BAY: %hhu %s\n",
862 bay, active);
863 } else {
864 output_len += scnprintf(buf + output_len,
865 PAGE_SIZE - output_len,
866 "BOX: %hhu BAY: %hhu %s\n",
867 box, bay, active);
868 }
869 } else if (box != 0 && box != 0xFF) {
870 output_len += scnprintf(buf + output_len,
871 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
872 box, active);
873 } else
874 output_len += scnprintf(buf + output_len,
875 PAGE_SIZE - output_len, "%s\n", active);
876 }
877
878 spin_unlock_irqrestore(&h->devlock, flags);
879 return output_len;
880}
881
882static ssize_t host_show_ctlr_num(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct ctlr_info *h;
886 struct Scsi_Host *shost = class_to_shost(dev);
887
888 h = shost_to_hba(shost);
889 return snprintf(buf, 20, "%d\n", h->ctlr);
890}
891
892static ssize_t host_show_legacy_board(struct device *dev,
893 struct device_attribute *attr, char *buf)
894{
895 struct ctlr_info *h;
896 struct Scsi_Host *shost = class_to_shost(dev);
897
898 h = shost_to_hba(shost);
899 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
900}
901
902static DEVICE_ATTR_RO(raid_level);
903static DEVICE_ATTR_RO(lunid);
904static DEVICE_ATTR_RO(unique_id);
905static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
906static DEVICE_ATTR_RO(sas_address);
907static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
908 host_show_hp_ssd_smart_path_enabled, NULL);
909static DEVICE_ATTR_RO(path_info);
910static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
911 host_show_hp_ssd_smart_path_status,
912 host_store_hp_ssd_smart_path_status);
913static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
914 host_store_raid_offload_debug);
915static DEVICE_ATTR(firmware_revision, S_IRUGO,
916 host_show_firmware_revision, NULL);
917static DEVICE_ATTR(commands_outstanding, S_IRUGO,
918 host_show_commands_outstanding, NULL);
919static DEVICE_ATTR(transport_mode, S_IRUGO,
920 host_show_transport_mode, NULL);
921static DEVICE_ATTR(resettable, S_IRUGO,
922 host_show_resettable, NULL);
923static DEVICE_ATTR(lockup_detected, S_IRUGO,
924 host_show_lockup_detected, NULL);
925static DEVICE_ATTR(ctlr_num, S_IRUGO,
926 host_show_ctlr_num, NULL);
927static DEVICE_ATTR(legacy_board, S_IRUGO,
928 host_show_legacy_board, NULL);
929
930static struct device_attribute *hpsa_sdev_attrs[] = {
931 &dev_attr_raid_level,
932 &dev_attr_lunid,
933 &dev_attr_unique_id,
934 &dev_attr_hp_ssd_smart_path_enabled,
935 &dev_attr_path_info,
936 &dev_attr_sas_address,
937 NULL,
938};
939
940static struct device_attribute *hpsa_shost_attrs[] = {
941 &dev_attr_rescan,
942 &dev_attr_firmware_revision,
943 &dev_attr_commands_outstanding,
944 &dev_attr_transport_mode,
945 &dev_attr_resettable,
946 &dev_attr_hp_ssd_smart_path_status,
947 &dev_attr_raid_offload_debug,
948 &dev_attr_lockup_detected,
949 &dev_attr_ctlr_num,
950 &dev_attr_legacy_board,
951 NULL,
952};
953
954#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
955 HPSA_MAX_CONCURRENT_PASSTHRUS)
956
957static struct scsi_host_template hpsa_driver_template = {
958 .module = THIS_MODULE,
959 .name = HPSA,
960 .proc_name = HPSA,
961 .queuecommand = hpsa_scsi_queue_command,
962 .scan_start = hpsa_scan_start,
963 .scan_finished = hpsa_scan_finished,
964 .change_queue_depth = hpsa_change_queue_depth,
965 .this_id = -1,
966 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
967 .ioctl = hpsa_ioctl,
968 .slave_alloc = hpsa_slave_alloc,
969 .slave_configure = hpsa_slave_configure,
970 .slave_destroy = hpsa_slave_destroy,
971#ifdef CONFIG_COMPAT
972 .compat_ioctl = hpsa_compat_ioctl,
973#endif
974 .sdev_attrs = hpsa_sdev_attrs,
975 .shost_attrs = hpsa_shost_attrs,
976 .max_sectors = 2048,
977 .no_write_same = 1,
978};
979
980static inline u32 next_command(struct ctlr_info *h, u8 q)
981{
982 u32 a;
983 struct reply_queue_buffer *rq = &h->reply_queue[q];
984
985 if (h->transMethod & CFGTBL_Trans_io_accel1)
986 return h->access.command_completed(h, q);
987
988 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
989 return h->access.command_completed(h, q);
990
991 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
992 a = rq->head[rq->current_entry];
993 rq->current_entry++;
994 atomic_dec(&h->commands_outstanding);
995 } else {
996 a = FIFO_EMPTY;
997 }
998 /* Check for wraparound */
999 if (rq->current_entry == h->max_commands) {
1000 rq->current_entry = 0;
1001 rq->wraparound ^= 1;
1002 }
1003 return a;
1004}
1005
1006/*
1007 * There are some special bits in the bus address of the
1008 * command that we have to set for the controller to know
1009 * how to process the command:
1010 *
1011 * Normal performant mode:
1012 * bit 0: 1 means performant mode, 0 means simple mode.
1013 * bits 1-3 = block fetch table entry
1014 * bits 4-6 = command type (== 0)
1015 *
1016 * ioaccel1 mode:
1017 * bit 0 = "performant mode" bit.
1018 * bits 1-3 = block fetch table entry
1019 * bits 4-6 = command type (== 110)
1020 * (command type is needed because ioaccel1 mode
1021 * commands are submitted through the same register as normal
1022 * mode commands, so this is how the controller knows whether
1023 * the command is normal mode or ioaccel1 mode.)
1024 *
1025 * ioaccel2 mode:
1026 * bit 0 = "performant mode" bit.
1027 * bits 1-4 = block fetch table entry (note extra bit)
1028 * bits 4-6 = not needed, because ioaccel2 mode has
1029 * a separate special register for submitting commands.
1030 */
1031
1032/*
1033 * set_performant_mode: Modify the tag for cciss performant
1034 * set bit 0 for pull model, bits 3-1 for block fetch
1035 * register number
1036 */
1037#define DEFAULT_REPLY_QUEUE (-1)
1038static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1039 int reply_queue)
1040{
1041 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1042 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1043 if (unlikely(!h->msix_vectors))
1044 return;
1045 c->Header.ReplyQueue = reply_queue;
1046 }
1047}
1048
1049static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1050 struct CommandList *c,
1051 int reply_queue)
1052{
1053 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1054
1055 /*
1056 * Tell the controller to post the reply to the queue for this
1057 * processor. This seems to give the best I/O throughput.
1058 */
1059 cp->ReplyQueue = reply_queue;
1060 /*
1061 * Set the bits in the address sent down to include:
1062 * - performant mode bit (bit 0)
1063 * - pull count (bits 1-3)
1064 * - command type (bits 4-6)
1065 */
1066 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1067 IOACCEL1_BUSADDR_CMDTYPE;
1068}
1069
1070static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1071 struct CommandList *c,
1072 int reply_queue)
1073{
1074 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1075 &h->ioaccel2_cmd_pool[c->cmdindex];
1076
1077 /* Tell the controller to post the reply to the queue for this
1078 * processor. This seems to give the best I/O throughput.
1079 */
1080 cp->reply_queue = reply_queue;
1081 /* Set the bits in the address sent down to include:
1082 * - performant mode bit not used in ioaccel mode 2
1083 * - pull count (bits 0-3)
1084 * - command type isn't needed for ioaccel2
1085 */
1086 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1087}
1088
1089static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1090 struct CommandList *c,
1091 int reply_queue)
1092{
1093 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1094
1095 /*
1096 * Tell the controller to post the reply to the queue for this
1097 * processor. This seems to give the best I/O throughput.
1098 */
1099 cp->reply_queue = reply_queue;
1100 /*
1101 * Set the bits in the address sent down to include:
1102 * - performant mode bit not used in ioaccel mode 2
1103 * - pull count (bits 0-3)
1104 * - command type isn't needed for ioaccel2
1105 */
1106 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1107}
1108
1109static int is_firmware_flash_cmd(u8 *cdb)
1110{
1111 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1112}
1113
1114/*
1115 * During firmware flash, the heartbeat register may not update as frequently
1116 * as it should. So we dial down lockup detection during firmware flash. and
1117 * dial it back up when firmware flash completes.
1118 */
1119#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1120#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1121#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1122static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1123 struct CommandList *c)
1124{
1125 if (!is_firmware_flash_cmd(c->Request.CDB))
1126 return;
1127 atomic_inc(&h->firmware_flash_in_progress);
1128 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1129}
1130
1131static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1132 struct CommandList *c)
1133{
1134 if (is_firmware_flash_cmd(c->Request.CDB) &&
1135 atomic_dec_and_test(&h->firmware_flash_in_progress))
1136 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1137}
1138
1139static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1140 struct CommandList *c, int reply_queue)
1141{
1142 dial_down_lockup_detection_during_fw_flash(h, c);
1143 atomic_inc(&h->commands_outstanding);
1144 if (c->device)
1145 atomic_inc(&c->device->commands_outstanding);
1146
1147 reply_queue = h->reply_map[raw_smp_processor_id()];
1148 switch (c->cmd_type) {
1149 case CMD_IOACCEL1:
1150 set_ioaccel1_performant_mode(h, c, reply_queue);
1151 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1152 break;
1153 case CMD_IOACCEL2:
1154 set_ioaccel2_performant_mode(h, c, reply_queue);
1155 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1156 break;
1157 case IOACCEL2_TMF:
1158 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1159 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1160 break;
1161 default:
1162 set_performant_mode(h, c, reply_queue);
1163 h->access.submit_command(h, c);
1164 }
1165}
1166
1167static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1168{
1169 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1170}
1171
1172static inline int is_hba_lunid(unsigned char scsi3addr[])
1173{
1174 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1175}
1176
1177static inline int is_scsi_rev_5(struct ctlr_info *h)
1178{
1179 if (!h->hba_inquiry_data)
1180 return 0;
1181 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1182 return 1;
1183 return 0;
1184}
1185
1186static int hpsa_find_target_lun(struct ctlr_info *h,
1187 unsigned char scsi3addr[], int bus, int *target, int *lun)
1188{
1189 /* finds an unused bus, target, lun for a new physical device
1190 * assumes h->devlock is held
1191 */
1192 int i, found = 0;
1193 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1194
1195 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1196
1197 for (i = 0; i < h->ndevices; i++) {
1198 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1199 __set_bit(h->dev[i]->target, lun_taken);
1200 }
1201
1202 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1203 if (i < HPSA_MAX_DEVICES) {
1204 /* *bus = 1; */
1205 *target = i;
1206 *lun = 0;
1207 found = 1;
1208 }
1209 return !found;
1210}
1211
1212static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1213 struct hpsa_scsi_dev_t *dev, char *description)
1214{
1215#define LABEL_SIZE 25
1216 char label[LABEL_SIZE];
1217
1218 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1219 return;
1220
1221 switch (dev->devtype) {
1222 case TYPE_RAID:
1223 snprintf(label, LABEL_SIZE, "controller");
1224 break;
1225 case TYPE_ENCLOSURE:
1226 snprintf(label, LABEL_SIZE, "enclosure");
1227 break;
1228 case TYPE_DISK:
1229 case TYPE_ZBC:
1230 if (dev->external)
1231 snprintf(label, LABEL_SIZE, "external");
1232 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1233 snprintf(label, LABEL_SIZE, "%s",
1234 raid_label[PHYSICAL_DRIVE]);
1235 else
1236 snprintf(label, LABEL_SIZE, "RAID-%s",
1237 dev->raid_level > RAID_UNKNOWN ? "?" :
1238 raid_label[dev->raid_level]);
1239 break;
1240 case TYPE_ROM:
1241 snprintf(label, LABEL_SIZE, "rom");
1242 break;
1243 case TYPE_TAPE:
1244 snprintf(label, LABEL_SIZE, "tape");
1245 break;
1246 case TYPE_MEDIUM_CHANGER:
1247 snprintf(label, LABEL_SIZE, "changer");
1248 break;
1249 default:
1250 snprintf(label, LABEL_SIZE, "UNKNOWN");
1251 break;
1252 }
1253
1254 dev_printk(level, &h->pdev->dev,
1255 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1256 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1257 description,
1258 scsi_device_type(dev->devtype),
1259 dev->vendor,
1260 dev->model,
1261 label,
1262 dev->offload_config ? '+' : '-',
1263 dev->offload_to_be_enabled ? '+' : '-',
1264 dev->expose_device);
1265}
1266
1267/* Add an entry into h->dev[] array. */
1268static int hpsa_scsi_add_entry(struct ctlr_info *h,
1269 struct hpsa_scsi_dev_t *device,
1270 struct hpsa_scsi_dev_t *added[], int *nadded)
1271{
1272 /* assumes h->devlock is held */
1273 int n = h->ndevices;
1274 int i;
1275 unsigned char addr1[8], addr2[8];
1276 struct hpsa_scsi_dev_t *sd;
1277
1278 if (n >= HPSA_MAX_DEVICES) {
1279 dev_err(&h->pdev->dev, "too many devices, some will be "
1280 "inaccessible.\n");
1281 return -1;
1282 }
1283
1284 /* physical devices do not have lun or target assigned until now. */
1285 if (device->lun != -1)
1286 /* Logical device, lun is already assigned. */
1287 goto lun_assigned;
1288
1289 /* If this device a non-zero lun of a multi-lun device
1290 * byte 4 of the 8-byte LUN addr will contain the logical
1291 * unit no, zero otherwise.
1292 */
1293 if (device->scsi3addr[4] == 0) {
1294 /* This is not a non-zero lun of a multi-lun device */
1295 if (hpsa_find_target_lun(h, device->scsi3addr,
1296 device->bus, &device->target, &device->lun) != 0)
1297 return -1;
1298 goto lun_assigned;
1299 }
1300
1301 /* This is a non-zero lun of a multi-lun device.
1302 * Search through our list and find the device which
1303 * has the same 8 byte LUN address, excepting byte 4 and 5.
1304 * Assign the same bus and target for this new LUN.
1305 * Use the logical unit number from the firmware.
1306 */
1307 memcpy(addr1, device->scsi3addr, 8);
1308 addr1[4] = 0;
1309 addr1[5] = 0;
1310 for (i = 0; i < n; i++) {
1311 sd = h->dev[i];
1312 memcpy(addr2, sd->scsi3addr, 8);
1313 addr2[4] = 0;
1314 addr2[5] = 0;
1315 /* differ only in byte 4 and 5? */
1316 if (memcmp(addr1, addr2, 8) == 0) {
1317 device->bus = sd->bus;
1318 device->target = sd->target;
1319 device->lun = device->scsi3addr[4];
1320 break;
1321 }
1322 }
1323 if (device->lun == -1) {
1324 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1325 " suspect firmware bug or unsupported hardware "
1326 "configuration.\n");
1327 return -1;
1328 }
1329
1330lun_assigned:
1331
1332 h->dev[n] = device;
1333 h->ndevices++;
1334 added[*nadded] = device;
1335 (*nadded)++;
1336 hpsa_show_dev_msg(KERN_INFO, h, device,
1337 device->expose_device ? "added" : "masked");
1338 return 0;
1339}
1340
1341/*
1342 * Called during a scan operation.
1343 *
1344 * Update an entry in h->dev[] array.
1345 */
1346static void hpsa_scsi_update_entry(struct ctlr_info *h,
1347 int entry, struct hpsa_scsi_dev_t *new_entry)
1348{
1349 /* assumes h->devlock is held */
1350 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1351
1352 /* Raid level changed. */
1353 h->dev[entry]->raid_level = new_entry->raid_level;
1354
1355 /*
1356 * ioacccel_handle may have changed for a dual domain disk
1357 */
1358 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1359
1360 /* Raid offload parameters changed. Careful about the ordering. */
1361 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1362 /*
1363 * if drive is newly offload_enabled, we want to copy the
1364 * raid map data first. If previously offload_enabled and
1365 * offload_config were set, raid map data had better be
1366 * the same as it was before. If raid map data has changed
1367 * then it had better be the case that
1368 * h->dev[entry]->offload_enabled is currently 0.
1369 */
1370 h->dev[entry]->raid_map = new_entry->raid_map;
1371 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1372 }
1373 if (new_entry->offload_to_be_enabled) {
1374 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1375 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1376 }
1377 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1378 h->dev[entry]->offload_config = new_entry->offload_config;
1379 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1380 h->dev[entry]->queue_depth = new_entry->queue_depth;
1381
1382 /*
1383 * We can turn off ioaccel offload now, but need to delay turning
1384 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1385 * can't do that until all the devices are updated.
1386 */
1387 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1388
1389 /*
1390 * turn ioaccel off immediately if told to do so.
1391 */
1392 if (!new_entry->offload_to_be_enabled)
1393 h->dev[entry]->offload_enabled = 0;
1394
1395 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1396}
1397
1398/* Replace an entry from h->dev[] array. */
1399static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1400 int entry, struct hpsa_scsi_dev_t *new_entry,
1401 struct hpsa_scsi_dev_t *added[], int *nadded,
1402 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1403{
1404 /* assumes h->devlock is held */
1405 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1406 removed[*nremoved] = h->dev[entry];
1407 (*nremoved)++;
1408
1409 /*
1410 * New physical devices won't have target/lun assigned yet
1411 * so we need to preserve the values in the slot we are replacing.
1412 */
1413 if (new_entry->target == -1) {
1414 new_entry->target = h->dev[entry]->target;
1415 new_entry->lun = h->dev[entry]->lun;
1416 }
1417
1418 h->dev[entry] = new_entry;
1419 added[*nadded] = new_entry;
1420 (*nadded)++;
1421
1422 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1423}
1424
1425/* Remove an entry from h->dev[] array. */
1426static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1427 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1428{
1429 /* assumes h->devlock is held */
1430 int i;
1431 struct hpsa_scsi_dev_t *sd;
1432
1433 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1434
1435 sd = h->dev[entry];
1436 removed[*nremoved] = h->dev[entry];
1437 (*nremoved)++;
1438
1439 for (i = entry; i < h->ndevices-1; i++)
1440 h->dev[i] = h->dev[i+1];
1441 h->ndevices--;
1442 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1443}
1444
1445#define SCSI3ADDR_EQ(a, b) ( \
1446 (a)[7] == (b)[7] && \
1447 (a)[6] == (b)[6] && \
1448 (a)[5] == (b)[5] && \
1449 (a)[4] == (b)[4] && \
1450 (a)[3] == (b)[3] && \
1451 (a)[2] == (b)[2] && \
1452 (a)[1] == (b)[1] && \
1453 (a)[0] == (b)[0])
1454
1455static void fixup_botched_add(struct ctlr_info *h,
1456 struct hpsa_scsi_dev_t *added)
1457{
1458 /* called when scsi_add_device fails in order to re-adjust
1459 * h->dev[] to match the mid layer's view.
1460 */
1461 unsigned long flags;
1462 int i, j;
1463
1464 spin_lock_irqsave(&h->lock, flags);
1465 for (i = 0; i < h->ndevices; i++) {
1466 if (h->dev[i] == added) {
1467 for (j = i; j < h->ndevices-1; j++)
1468 h->dev[j] = h->dev[j+1];
1469 h->ndevices--;
1470 break;
1471 }
1472 }
1473 spin_unlock_irqrestore(&h->lock, flags);
1474 kfree(added);
1475}
1476
1477static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1478 struct hpsa_scsi_dev_t *dev2)
1479{
1480 /* we compare everything except lun and target as these
1481 * are not yet assigned. Compare parts likely
1482 * to differ first
1483 */
1484 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1485 sizeof(dev1->scsi3addr)) != 0)
1486 return 0;
1487 if (memcmp(dev1->device_id, dev2->device_id,
1488 sizeof(dev1->device_id)) != 0)
1489 return 0;
1490 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1491 return 0;
1492 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1493 return 0;
1494 if (dev1->devtype != dev2->devtype)
1495 return 0;
1496 if (dev1->bus != dev2->bus)
1497 return 0;
1498 return 1;
1499}
1500
1501static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1502 struct hpsa_scsi_dev_t *dev2)
1503{
1504 /* Device attributes that can change, but don't mean
1505 * that the device is a different device, nor that the OS
1506 * needs to be told anything about the change.
1507 */
1508 if (dev1->raid_level != dev2->raid_level)
1509 return 1;
1510 if (dev1->offload_config != dev2->offload_config)
1511 return 1;
1512 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1513 return 1;
1514 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1515 if (dev1->queue_depth != dev2->queue_depth)
1516 return 1;
1517 /*
1518 * This can happen for dual domain devices. An active
1519 * path change causes the ioaccel handle to change
1520 *
1521 * for example note the handle differences between p0 and p1
1522 * Device WWN ,WWN hash,Handle
1523 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1524 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1525 */
1526 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1527 return 1;
1528 return 0;
1529}
1530
1531/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1532 * and return needle location in *index. If scsi3addr matches, but not
1533 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1534 * location in *index.
1535 * In the case of a minor device attribute change, such as RAID level, just
1536 * return DEVICE_UPDATED, along with the updated device's location in index.
1537 * If needle not found, return DEVICE_NOT_FOUND.
1538 */
1539static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1540 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1541 int *index)
1542{
1543 int i;
1544#define DEVICE_NOT_FOUND 0
1545#define DEVICE_CHANGED 1
1546#define DEVICE_SAME 2
1547#define DEVICE_UPDATED 3
1548 if (needle == NULL)
1549 return DEVICE_NOT_FOUND;
1550
1551 for (i = 0; i < haystack_size; i++) {
1552 if (haystack[i] == NULL) /* previously removed. */
1553 continue;
1554 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1555 *index = i;
1556 if (device_is_the_same(needle, haystack[i])) {
1557 if (device_updated(needle, haystack[i]))
1558 return DEVICE_UPDATED;
1559 return DEVICE_SAME;
1560 } else {
1561 /* Keep offline devices offline */
1562 if (needle->volume_offline)
1563 return DEVICE_NOT_FOUND;
1564 return DEVICE_CHANGED;
1565 }
1566 }
1567 }
1568 *index = -1;
1569 return DEVICE_NOT_FOUND;
1570}
1571
1572static void hpsa_monitor_offline_device(struct ctlr_info *h,
1573 unsigned char scsi3addr[])
1574{
1575 struct offline_device_entry *device;
1576 unsigned long flags;
1577
1578 /* Check to see if device is already on the list */
1579 spin_lock_irqsave(&h->offline_device_lock, flags);
1580 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1581 if (memcmp(device->scsi3addr, scsi3addr,
1582 sizeof(device->scsi3addr)) == 0) {
1583 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1584 return;
1585 }
1586 }
1587 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1588
1589 /* Device is not on the list, add it. */
1590 device = kmalloc(sizeof(*device), GFP_KERNEL);
1591 if (!device)
1592 return;
1593
1594 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1595 spin_lock_irqsave(&h->offline_device_lock, flags);
1596 list_add_tail(&device->offline_list, &h->offline_device_list);
1597 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1598}
1599
1600/* Print a message explaining various offline volume states */
1601static void hpsa_show_volume_status(struct ctlr_info *h,
1602 struct hpsa_scsi_dev_t *sd)
1603{
1604 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1605 dev_info(&h->pdev->dev,
1606 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1607 h->scsi_host->host_no,
1608 sd->bus, sd->target, sd->lun);
1609 switch (sd->volume_offline) {
1610 case HPSA_LV_OK:
1611 break;
1612 case HPSA_LV_UNDERGOING_ERASE:
1613 dev_info(&h->pdev->dev,
1614 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1615 h->scsi_host->host_no,
1616 sd->bus, sd->target, sd->lun);
1617 break;
1618 case HPSA_LV_NOT_AVAILABLE:
1619 dev_info(&h->pdev->dev,
1620 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1621 h->scsi_host->host_no,
1622 sd->bus, sd->target, sd->lun);
1623 break;
1624 case HPSA_LV_UNDERGOING_RPI:
1625 dev_info(&h->pdev->dev,
1626 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1627 h->scsi_host->host_no,
1628 sd->bus, sd->target, sd->lun);
1629 break;
1630 case HPSA_LV_PENDING_RPI:
1631 dev_info(&h->pdev->dev,
1632 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1633 h->scsi_host->host_no,
1634 sd->bus, sd->target, sd->lun);
1635 break;
1636 case HPSA_LV_ENCRYPTED_NO_KEY:
1637 dev_info(&h->pdev->dev,
1638 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1639 h->scsi_host->host_no,
1640 sd->bus, sd->target, sd->lun);
1641 break;
1642 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1643 dev_info(&h->pdev->dev,
1644 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1645 h->scsi_host->host_no,
1646 sd->bus, sd->target, sd->lun);
1647 break;
1648 case HPSA_LV_UNDERGOING_ENCRYPTION:
1649 dev_info(&h->pdev->dev,
1650 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1651 h->scsi_host->host_no,
1652 sd->bus, sd->target, sd->lun);
1653 break;
1654 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1655 dev_info(&h->pdev->dev,
1656 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1657 h->scsi_host->host_no,
1658 sd->bus, sd->target, sd->lun);
1659 break;
1660 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1661 dev_info(&h->pdev->dev,
1662 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1663 h->scsi_host->host_no,
1664 sd->bus, sd->target, sd->lun);
1665 break;
1666 case HPSA_LV_PENDING_ENCRYPTION:
1667 dev_info(&h->pdev->dev,
1668 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1669 h->scsi_host->host_no,
1670 sd->bus, sd->target, sd->lun);
1671 break;
1672 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1673 dev_info(&h->pdev->dev,
1674 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1675 h->scsi_host->host_no,
1676 sd->bus, sd->target, sd->lun);
1677 break;
1678 }
1679}
1680
1681/*
1682 * Figure the list of physical drive pointers for a logical drive with
1683 * raid offload configured.
1684 */
1685static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1686 struct hpsa_scsi_dev_t *dev[], int ndevices,
1687 struct hpsa_scsi_dev_t *logical_drive)
1688{
1689 struct raid_map_data *map = &logical_drive->raid_map;
1690 struct raid_map_disk_data *dd = &map->data[0];
1691 int i, j;
1692 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1693 le16_to_cpu(map->metadata_disks_per_row);
1694 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1695 le16_to_cpu(map->layout_map_count) *
1696 total_disks_per_row;
1697 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1698 total_disks_per_row;
1699 int qdepth;
1700
1701 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1702 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1703
1704 logical_drive->nphysical_disks = nraid_map_entries;
1705
1706 qdepth = 0;
1707 for (i = 0; i < nraid_map_entries; i++) {
1708 logical_drive->phys_disk[i] = NULL;
1709 if (!logical_drive->offload_config)
1710 continue;
1711 for (j = 0; j < ndevices; j++) {
1712 if (dev[j] == NULL)
1713 continue;
1714 if (dev[j]->devtype != TYPE_DISK &&
1715 dev[j]->devtype != TYPE_ZBC)
1716 continue;
1717 if (is_logical_device(dev[j]))
1718 continue;
1719 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1720 continue;
1721
1722 logical_drive->phys_disk[i] = dev[j];
1723 if (i < nphys_disk)
1724 qdepth = min(h->nr_cmds, qdepth +
1725 logical_drive->phys_disk[i]->queue_depth);
1726 break;
1727 }
1728
1729 /*
1730 * This can happen if a physical drive is removed and
1731 * the logical drive is degraded. In that case, the RAID
1732 * map data will refer to a physical disk which isn't actually
1733 * present. And in that case offload_enabled should already
1734 * be 0, but we'll turn it off here just in case
1735 */
1736 if (!logical_drive->phys_disk[i]) {
1737 dev_warn(&h->pdev->dev,
1738 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1739 __func__,
1740 h->scsi_host->host_no, logical_drive->bus,
1741 logical_drive->target, logical_drive->lun);
1742 logical_drive->offload_enabled = 0;
1743 logical_drive->offload_to_be_enabled = 0;
1744 logical_drive->queue_depth = 8;
1745 }
1746 }
1747 if (nraid_map_entries)
1748 /*
1749 * This is correct for reads, too high for full stripe writes,
1750 * way too high for partial stripe writes
1751 */
1752 logical_drive->queue_depth = qdepth;
1753 else {
1754 if (logical_drive->external)
1755 logical_drive->queue_depth = EXTERNAL_QD;
1756 else
1757 logical_drive->queue_depth = h->nr_cmds;
1758 }
1759}
1760
1761static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1762 struct hpsa_scsi_dev_t *dev[], int ndevices)
1763{
1764 int i;
1765
1766 for (i = 0; i < ndevices; i++) {
1767 if (dev[i] == NULL)
1768 continue;
1769 if (dev[i]->devtype != TYPE_DISK &&
1770 dev[i]->devtype != TYPE_ZBC)
1771 continue;
1772 if (!is_logical_device(dev[i]))
1773 continue;
1774
1775 /*
1776 * If offload is currently enabled, the RAID map and
1777 * phys_disk[] assignment *better* not be changing
1778 * because we would be changing ioaccel phsy_disk[] pointers
1779 * on a ioaccel volume processing I/O requests.
1780 *
1781 * If an ioaccel volume status changed, initially because it was
1782 * re-configured and thus underwent a transformation, or
1783 * a drive failed, we would have received a state change
1784 * request and ioaccel should have been turned off. When the
1785 * transformation completes, we get another state change
1786 * request to turn ioaccel back on. In this case, we need
1787 * to update the ioaccel information.
1788 *
1789 * Thus: If it is not currently enabled, but will be after
1790 * the scan completes, make sure the ioaccel pointers
1791 * are up to date.
1792 */
1793
1794 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1795 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1796 }
1797}
1798
1799static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1800{
1801 int rc = 0;
1802
1803 if (!h->scsi_host)
1804 return 1;
1805
1806 if (is_logical_device(device)) /* RAID */
1807 rc = scsi_add_device(h->scsi_host, device->bus,
1808 device->target, device->lun);
1809 else /* HBA */
1810 rc = hpsa_add_sas_device(h->sas_host, device);
1811
1812 return rc;
1813}
1814
1815static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1816 struct hpsa_scsi_dev_t *dev)
1817{
1818 int i;
1819 int count = 0;
1820
1821 for (i = 0; i < h->nr_cmds; i++) {
1822 struct CommandList *c = h->cmd_pool + i;
1823 int refcount = atomic_inc_return(&c->refcount);
1824
1825 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1826 dev->scsi3addr)) {
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1830 if (!hpsa_is_cmd_idle(c))
1831 ++count;
1832 spin_unlock_irqrestore(&h->lock, flags);
1833 }
1834
1835 cmd_free(h, c);
1836 }
1837
1838 return count;
1839}
1840
1841#define NUM_WAIT 20
1842static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1843 struct hpsa_scsi_dev_t *device)
1844{
1845 int cmds = 0;
1846 int waits = 0;
1847 int num_wait = NUM_WAIT;
1848
1849 if (device->external)
1850 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1851
1852 while (1) {
1853 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1854 if (cmds == 0)
1855 break;
1856 if (++waits > num_wait)
1857 break;
1858 msleep(1000);
1859 }
1860
1861 if (waits > num_wait) {
1862 dev_warn(&h->pdev->dev,
1863 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1864 __func__,
1865 h->scsi_host->host_no,
1866 device->bus, device->target, device->lun, cmds);
1867 }
1868}
1869
1870static void hpsa_remove_device(struct ctlr_info *h,
1871 struct hpsa_scsi_dev_t *device)
1872{
1873 struct scsi_device *sdev = NULL;
1874
1875 if (!h->scsi_host)
1876 return;
1877
1878 /*
1879 * Allow for commands to drain
1880 */
1881 device->removed = 1;
1882 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1883
1884 if (is_logical_device(device)) { /* RAID */
1885 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1886 device->target, device->lun);
1887 if (sdev) {
1888 scsi_remove_device(sdev);
1889 scsi_device_put(sdev);
1890 } else {
1891 /*
1892 * We don't expect to get here. Future commands
1893 * to this device will get a selection timeout as
1894 * if the device were gone.
1895 */
1896 hpsa_show_dev_msg(KERN_WARNING, h, device,
1897 "didn't find device for removal.");
1898 }
1899 } else { /* HBA */
1900
1901 hpsa_remove_sas_device(device);
1902 }
1903}
1904
1905static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1906 struct hpsa_scsi_dev_t *sd[], int nsds)
1907{
1908 /* sd contains scsi3 addresses and devtypes, and inquiry
1909 * data. This function takes what's in sd to be the current
1910 * reality and updates h->dev[] to reflect that reality.
1911 */
1912 int i, entry, device_change, changes = 0;
1913 struct hpsa_scsi_dev_t *csd;
1914 unsigned long flags;
1915 struct hpsa_scsi_dev_t **added, **removed;
1916 int nadded, nremoved;
1917
1918 /*
1919 * A reset can cause a device status to change
1920 * re-schedule the scan to see what happened.
1921 */
1922 spin_lock_irqsave(&h->reset_lock, flags);
1923 if (h->reset_in_progress) {
1924 h->drv_req_rescan = 1;
1925 spin_unlock_irqrestore(&h->reset_lock, flags);
1926 return;
1927 }
1928 spin_unlock_irqrestore(&h->reset_lock, flags);
1929
1930 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1931 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1932
1933 if (!added || !removed) {
1934 dev_warn(&h->pdev->dev, "out of memory in "
1935 "adjust_hpsa_scsi_table\n");
1936 goto free_and_out;
1937 }
1938
1939 spin_lock_irqsave(&h->devlock, flags);
1940
1941 /* find any devices in h->dev[] that are not in
1942 * sd[] and remove them from h->dev[], and for any
1943 * devices which have changed, remove the old device
1944 * info and add the new device info.
1945 * If minor device attributes change, just update
1946 * the existing device structure.
1947 */
1948 i = 0;
1949 nremoved = 0;
1950 nadded = 0;
1951 while (i < h->ndevices) {
1952 csd = h->dev[i];
1953 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1954 if (device_change == DEVICE_NOT_FOUND) {
1955 changes++;
1956 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1957 continue; /* remove ^^^, hence i not incremented */
1958 } else if (device_change == DEVICE_CHANGED) {
1959 changes++;
1960 hpsa_scsi_replace_entry(h, i, sd[entry],
1961 added, &nadded, removed, &nremoved);
1962 /* Set it to NULL to prevent it from being freed
1963 * at the bottom of hpsa_update_scsi_devices()
1964 */
1965 sd[entry] = NULL;
1966 } else if (device_change == DEVICE_UPDATED) {
1967 hpsa_scsi_update_entry(h, i, sd[entry]);
1968 }
1969 i++;
1970 }
1971
1972 /* Now, make sure every device listed in sd[] is also
1973 * listed in h->dev[], adding them if they aren't found
1974 */
1975
1976 for (i = 0; i < nsds; i++) {
1977 if (!sd[i]) /* if already added above. */
1978 continue;
1979
1980 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1981 * as the SCSI mid-layer does not handle such devices well.
1982 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1983 * at 160Hz, and prevents the system from coming up.
1984 */
1985 if (sd[i]->volume_offline) {
1986 hpsa_show_volume_status(h, sd[i]);
1987 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1988 continue;
1989 }
1990
1991 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1992 h->ndevices, &entry);
1993 if (device_change == DEVICE_NOT_FOUND) {
1994 changes++;
1995 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1996 break;
1997 sd[i] = NULL; /* prevent from being freed later. */
1998 } else if (device_change == DEVICE_CHANGED) {
1999 /* should never happen... */
2000 changes++;
2001 dev_warn(&h->pdev->dev,
2002 "device unexpectedly changed.\n");
2003 /* but if it does happen, we just ignore that device */
2004 }
2005 }
2006 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2007
2008 /*
2009 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2010 * any logical drives that need it enabled.
2011 *
2012 * The raid map should be current by now.
2013 *
2014 * We are updating the device list used for I/O requests.
2015 */
2016 for (i = 0; i < h->ndevices; i++) {
2017 if (h->dev[i] == NULL)
2018 continue;
2019 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2020 }
2021
2022 spin_unlock_irqrestore(&h->devlock, flags);
2023
2024 /* Monitor devices which are in one of several NOT READY states to be
2025 * brought online later. This must be done without holding h->devlock,
2026 * so don't touch h->dev[]
2027 */
2028 for (i = 0; i < nsds; i++) {
2029 if (!sd[i]) /* if already added above. */
2030 continue;
2031 if (sd[i]->volume_offline)
2032 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2033 }
2034
2035 /* Don't notify scsi mid layer of any changes the first time through
2036 * (or if there are no changes) scsi_scan_host will do it later the
2037 * first time through.
2038 */
2039 if (!changes)
2040 goto free_and_out;
2041
2042 /* Notify scsi mid layer of any removed devices */
2043 for (i = 0; i < nremoved; i++) {
2044 if (removed[i] == NULL)
2045 continue;
2046 if (removed[i]->expose_device)
2047 hpsa_remove_device(h, removed[i]);
2048 kfree(removed[i]);
2049 removed[i] = NULL;
2050 }
2051
2052 /* Notify scsi mid layer of any added devices */
2053 for (i = 0; i < nadded; i++) {
2054 int rc = 0;
2055
2056 if (added[i] == NULL)
2057 continue;
2058 if (!(added[i]->expose_device))
2059 continue;
2060 rc = hpsa_add_device(h, added[i]);
2061 if (!rc)
2062 continue;
2063 dev_warn(&h->pdev->dev,
2064 "addition failed %d, device not added.", rc);
2065 /* now we have to remove it from h->dev,
2066 * since it didn't get added to scsi mid layer
2067 */
2068 fixup_botched_add(h, added[i]);
2069 h->drv_req_rescan = 1;
2070 }
2071
2072free_and_out:
2073 kfree(added);
2074 kfree(removed);
2075}
2076
2077/*
2078 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2079 * Assume's h->devlock is held.
2080 */
2081static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2082 int bus, int target, int lun)
2083{
2084 int i;
2085 struct hpsa_scsi_dev_t *sd;
2086
2087 for (i = 0; i < h->ndevices; i++) {
2088 sd = h->dev[i];
2089 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2090 return sd;
2091 }
2092 return NULL;
2093}
2094
2095static int hpsa_slave_alloc(struct scsi_device *sdev)
2096{
2097 struct hpsa_scsi_dev_t *sd = NULL;
2098 unsigned long flags;
2099 struct ctlr_info *h;
2100
2101 h = sdev_to_hba(sdev);
2102 spin_lock_irqsave(&h->devlock, flags);
2103 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2104 struct scsi_target *starget;
2105 struct sas_rphy *rphy;
2106
2107 starget = scsi_target(sdev);
2108 rphy = target_to_rphy(starget);
2109 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2110 if (sd) {
2111 sd->target = sdev_id(sdev);
2112 sd->lun = sdev->lun;
2113 }
2114 }
2115 if (!sd)
2116 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2117 sdev_id(sdev), sdev->lun);
2118
2119 if (sd && sd->expose_device) {
2120 atomic_set(&sd->ioaccel_cmds_out, 0);
2121 sdev->hostdata = sd;
2122 } else
2123 sdev->hostdata = NULL;
2124 spin_unlock_irqrestore(&h->devlock, flags);
2125 return 0;
2126}
2127
2128/* configure scsi device based on internal per-device structure */
2129static int hpsa_slave_configure(struct scsi_device *sdev)
2130{
2131 struct hpsa_scsi_dev_t *sd;
2132 int queue_depth;
2133
2134 sd = sdev->hostdata;
2135 sdev->no_uld_attach = !sd || !sd->expose_device;
2136
2137 if (sd) {
2138 sd->was_removed = 0;
2139 if (sd->external) {
2140 queue_depth = EXTERNAL_QD;
2141 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2142 blk_queue_rq_timeout(sdev->request_queue,
2143 HPSA_EH_PTRAID_TIMEOUT);
2144 } else {
2145 queue_depth = sd->queue_depth != 0 ?
2146 sd->queue_depth : sdev->host->can_queue;
2147 }
2148 } else
2149 queue_depth = sdev->host->can_queue;
2150
2151 scsi_change_queue_depth(sdev, queue_depth);
2152
2153 return 0;
2154}
2155
2156static void hpsa_slave_destroy(struct scsi_device *sdev)
2157{
2158 struct hpsa_scsi_dev_t *hdev = NULL;
2159
2160 hdev = sdev->hostdata;
2161
2162 if (hdev)
2163 hdev->was_removed = 1;
2164}
2165
2166static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2167{
2168 int i;
2169
2170 if (!h->ioaccel2_cmd_sg_list)
2171 return;
2172 for (i = 0; i < h->nr_cmds; i++) {
2173 kfree(h->ioaccel2_cmd_sg_list[i]);
2174 h->ioaccel2_cmd_sg_list[i] = NULL;
2175 }
2176 kfree(h->ioaccel2_cmd_sg_list);
2177 h->ioaccel2_cmd_sg_list = NULL;
2178}
2179
2180static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2181{
2182 int i;
2183
2184 if (h->chainsize <= 0)
2185 return 0;
2186
2187 h->ioaccel2_cmd_sg_list =
2188 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2189 GFP_KERNEL);
2190 if (!h->ioaccel2_cmd_sg_list)
2191 return -ENOMEM;
2192 for (i = 0; i < h->nr_cmds; i++) {
2193 h->ioaccel2_cmd_sg_list[i] =
2194 kmalloc_array(h->maxsgentries,
2195 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2196 GFP_KERNEL);
2197 if (!h->ioaccel2_cmd_sg_list[i])
2198 goto clean;
2199 }
2200 return 0;
2201
2202clean:
2203 hpsa_free_ioaccel2_sg_chain_blocks(h);
2204 return -ENOMEM;
2205}
2206
2207static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2208{
2209 int i;
2210
2211 if (!h->cmd_sg_list)
2212 return;
2213 for (i = 0; i < h->nr_cmds; i++) {
2214 kfree(h->cmd_sg_list[i]);
2215 h->cmd_sg_list[i] = NULL;
2216 }
2217 kfree(h->cmd_sg_list);
2218 h->cmd_sg_list = NULL;
2219}
2220
2221static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2222{
2223 int i;
2224
2225 if (h->chainsize <= 0)
2226 return 0;
2227
2228 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2229 GFP_KERNEL);
2230 if (!h->cmd_sg_list)
2231 return -ENOMEM;
2232
2233 for (i = 0; i < h->nr_cmds; i++) {
2234 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2235 sizeof(*h->cmd_sg_list[i]),
2236 GFP_KERNEL);
2237 if (!h->cmd_sg_list[i])
2238 goto clean;
2239
2240 }
2241 return 0;
2242
2243clean:
2244 hpsa_free_sg_chain_blocks(h);
2245 return -ENOMEM;
2246}
2247
2248static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2249 struct io_accel2_cmd *cp, struct CommandList *c)
2250{
2251 struct ioaccel2_sg_element *chain_block;
2252 u64 temp64;
2253 u32 chain_size;
2254
2255 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2256 chain_size = le32_to_cpu(cp->sg[0].length);
2257 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2258 DMA_TO_DEVICE);
2259 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2260 /* prevent subsequent unmapping */
2261 cp->sg->address = 0;
2262 return -1;
2263 }
2264 cp->sg->address = cpu_to_le64(temp64);
2265 return 0;
2266}
2267
2268static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2269 struct io_accel2_cmd *cp)
2270{
2271 struct ioaccel2_sg_element *chain_sg;
2272 u64 temp64;
2273 u32 chain_size;
2274
2275 chain_sg = cp->sg;
2276 temp64 = le64_to_cpu(chain_sg->address);
2277 chain_size = le32_to_cpu(cp->sg[0].length);
2278 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2279}
2280
2281static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2282 struct CommandList *c)
2283{
2284 struct SGDescriptor *chain_sg, *chain_block;
2285 u64 temp64;
2286 u32 chain_len;
2287
2288 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2289 chain_block = h->cmd_sg_list[c->cmdindex];
2290 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2291 chain_len = sizeof(*chain_sg) *
2292 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2293 chain_sg->Len = cpu_to_le32(chain_len);
2294 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2295 DMA_TO_DEVICE);
2296 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2297 /* prevent subsequent unmapping */
2298 chain_sg->Addr = cpu_to_le64(0);
2299 return -1;
2300 }
2301 chain_sg->Addr = cpu_to_le64(temp64);
2302 return 0;
2303}
2304
2305static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2306 struct CommandList *c)
2307{
2308 struct SGDescriptor *chain_sg;
2309
2310 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2311 return;
2312
2313 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2314 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2315 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2316}
2317
2318
2319/* Decode the various types of errors on ioaccel2 path.
2320 * Return 1 for any error that should generate a RAID path retry.
2321 * Return 0 for errors that don't require a RAID path retry.
2322 */
2323static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2324 struct CommandList *c,
2325 struct scsi_cmnd *cmd,
2326 struct io_accel2_cmd *c2,
2327 struct hpsa_scsi_dev_t *dev)
2328{
2329 int data_len;
2330 int retry = 0;
2331 u32 ioaccel2_resid = 0;
2332
2333 switch (c2->error_data.serv_response) {
2334 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2335 switch (c2->error_data.status) {
2336 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2337 break;
2338 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2339 cmd->result |= SAM_STAT_CHECK_CONDITION;
2340 if (c2->error_data.data_present !=
2341 IOACCEL2_SENSE_DATA_PRESENT) {
2342 memset(cmd->sense_buffer, 0,
2343 SCSI_SENSE_BUFFERSIZE);
2344 break;
2345 }
2346 /* copy the sense data */
2347 data_len = c2->error_data.sense_data_len;
2348 if (data_len > SCSI_SENSE_BUFFERSIZE)
2349 data_len = SCSI_SENSE_BUFFERSIZE;
2350 if (data_len > sizeof(c2->error_data.sense_data_buff))
2351 data_len =
2352 sizeof(c2->error_data.sense_data_buff);
2353 memcpy(cmd->sense_buffer,
2354 c2->error_data.sense_data_buff, data_len);
2355 retry = 1;
2356 break;
2357 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2358 retry = 1;
2359 break;
2360 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2361 retry = 1;
2362 break;
2363 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2364 retry = 1;
2365 break;
2366 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2367 retry = 1;
2368 break;
2369 default:
2370 retry = 1;
2371 break;
2372 }
2373 break;
2374 case IOACCEL2_SERV_RESPONSE_FAILURE:
2375 switch (c2->error_data.status) {
2376 case IOACCEL2_STATUS_SR_IO_ERROR:
2377 case IOACCEL2_STATUS_SR_IO_ABORTED:
2378 case IOACCEL2_STATUS_SR_OVERRUN:
2379 retry = 1;
2380 break;
2381 case IOACCEL2_STATUS_SR_UNDERRUN:
2382 cmd->result = (DID_OK << 16); /* host byte */
2383 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2384 ioaccel2_resid = get_unaligned_le32(
2385 &c2->error_data.resid_cnt[0]);
2386 scsi_set_resid(cmd, ioaccel2_resid);
2387 break;
2388 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2389 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2390 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2391 /*
2392 * Did an HBA disk disappear? We will eventually
2393 * get a state change event from the controller but
2394 * in the meantime, we need to tell the OS that the
2395 * HBA disk is no longer there and stop I/O
2396 * from going down. This allows the potential re-insert
2397 * of the disk to get the same device node.
2398 */
2399 if (dev->physical_device && dev->expose_device) {
2400 cmd->result = DID_NO_CONNECT << 16;
2401 dev->removed = 1;
2402 h->drv_req_rescan = 1;
2403 dev_warn(&h->pdev->dev,
2404 "%s: device is gone!\n", __func__);
2405 } else
2406 /*
2407 * Retry by sending down the RAID path.
2408 * We will get an event from ctlr to
2409 * trigger rescan regardless.
2410 */
2411 retry = 1;
2412 break;
2413 default:
2414 retry = 1;
2415 }
2416 break;
2417 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2418 break;
2419 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2420 break;
2421 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2422 retry = 1;
2423 break;
2424 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2425 break;
2426 default:
2427 retry = 1;
2428 break;
2429 }
2430
2431 if (dev->in_reset)
2432 retry = 0;
2433
2434 return retry; /* retry on raid path? */
2435}
2436
2437static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2438 struct CommandList *c)
2439{
2440 struct hpsa_scsi_dev_t *dev = c->device;
2441
2442 /*
2443 * Reset c->scsi_cmd here so that the reset handler will know
2444 * this command has completed. Then, check to see if the handler is
2445 * waiting for this command, and, if so, wake it.
2446 */
2447 c->scsi_cmd = SCSI_CMD_IDLE;
2448 mb(); /* Declare command idle before checking for pending events. */
2449 if (dev) {
2450 atomic_dec(&dev->commands_outstanding);
2451 if (dev->in_reset &&
2452 atomic_read(&dev->commands_outstanding) <= 0)
2453 wake_up_all(&h->event_sync_wait_queue);
2454 }
2455}
2456
2457static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2458 struct CommandList *c)
2459{
2460 hpsa_cmd_resolve_events(h, c);
2461 cmd_tagged_free(h, c);
2462}
2463
2464static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2465 struct CommandList *c, struct scsi_cmnd *cmd)
2466{
2467 hpsa_cmd_resolve_and_free(h, c);
2468 if (cmd && cmd->scsi_done)
2469 cmd->scsi_done(cmd);
2470}
2471
2472static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2473{
2474 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2475 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2476}
2477
2478static void process_ioaccel2_completion(struct ctlr_info *h,
2479 struct CommandList *c, struct scsi_cmnd *cmd,
2480 struct hpsa_scsi_dev_t *dev)
2481{
2482 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2483
2484 /* check for good status */
2485 if (likely(c2->error_data.serv_response == 0 &&
2486 c2->error_data.status == 0))
2487 return hpsa_cmd_free_and_done(h, c, cmd);
2488
2489 /*
2490 * Any RAID offload error results in retry which will use
2491 * the normal I/O path so the controller can handle whatever is
2492 * wrong.
2493 */
2494 if (is_logical_device(dev) &&
2495 c2->error_data.serv_response ==
2496 IOACCEL2_SERV_RESPONSE_FAILURE) {
2497 if (c2->error_data.status ==
2498 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2499 dev->offload_enabled = 0;
2500 dev->offload_to_be_enabled = 0;
2501 }
2502
2503 if (dev->in_reset) {
2504 cmd->result = DID_RESET << 16;
2505 return hpsa_cmd_free_and_done(h, c, cmd);
2506 }
2507
2508 return hpsa_retry_cmd(h, c);
2509 }
2510
2511 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2512 return hpsa_retry_cmd(h, c);
2513
2514 return hpsa_cmd_free_and_done(h, c, cmd);
2515}
2516
2517/* Returns 0 on success, < 0 otherwise. */
2518static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2519 struct CommandList *cp)
2520{
2521 u8 tmf_status = cp->err_info->ScsiStatus;
2522
2523 switch (tmf_status) {
2524 case CISS_TMF_COMPLETE:
2525 /*
2526 * CISS_TMF_COMPLETE never happens, instead,
2527 * ei->CommandStatus == 0 for this case.
2528 */
2529 case CISS_TMF_SUCCESS:
2530 return 0;
2531 case CISS_TMF_INVALID_FRAME:
2532 case CISS_TMF_NOT_SUPPORTED:
2533 case CISS_TMF_FAILED:
2534 case CISS_TMF_WRONG_LUN:
2535 case CISS_TMF_OVERLAPPED_TAG:
2536 break;
2537 default:
2538 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2539 tmf_status);
2540 break;
2541 }
2542 return -tmf_status;
2543}
2544
2545static void complete_scsi_command(struct CommandList *cp)
2546{
2547 struct scsi_cmnd *cmd;
2548 struct ctlr_info *h;
2549 struct ErrorInfo *ei;
2550 struct hpsa_scsi_dev_t *dev;
2551 struct io_accel2_cmd *c2;
2552
2553 u8 sense_key;
2554 u8 asc; /* additional sense code */
2555 u8 ascq; /* additional sense code qualifier */
2556 unsigned long sense_data_size;
2557
2558 ei = cp->err_info;
2559 cmd = cp->scsi_cmd;
2560 h = cp->h;
2561
2562 if (!cmd->device) {
2563 cmd->result = DID_NO_CONNECT << 16;
2564 return hpsa_cmd_free_and_done(h, cp, cmd);
2565 }
2566
2567 dev = cmd->device->hostdata;
2568 if (!dev) {
2569 cmd->result = DID_NO_CONNECT << 16;
2570 return hpsa_cmd_free_and_done(h, cp, cmd);
2571 }
2572 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2573
2574 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2575 if ((cp->cmd_type == CMD_SCSI) &&
2576 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2577 hpsa_unmap_sg_chain_block(h, cp);
2578
2579 if ((cp->cmd_type == CMD_IOACCEL2) &&
2580 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2581 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2582
2583 cmd->result = (DID_OK << 16); /* host byte */
2584 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2585
2586 /* SCSI command has already been cleaned up in SML */
2587 if (dev->was_removed) {
2588 hpsa_cmd_resolve_and_free(h, cp);
2589 return;
2590 }
2591
2592 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2593 if (dev->physical_device && dev->expose_device &&
2594 dev->removed) {
2595 cmd->result = DID_NO_CONNECT << 16;
2596 return hpsa_cmd_free_and_done(h, cp, cmd);
2597 }
2598 if (likely(cp->phys_disk != NULL))
2599 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2600 }
2601
2602 /*
2603 * We check for lockup status here as it may be set for
2604 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2605 * fail_all_oustanding_cmds()
2606 */
2607 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2608 /* DID_NO_CONNECT will prevent a retry */
2609 cmd->result = DID_NO_CONNECT << 16;
2610 return hpsa_cmd_free_and_done(h, cp, cmd);
2611 }
2612
2613 if (cp->cmd_type == CMD_IOACCEL2)
2614 return process_ioaccel2_completion(h, cp, cmd, dev);
2615
2616 scsi_set_resid(cmd, ei->ResidualCnt);
2617 if (ei->CommandStatus == 0)
2618 return hpsa_cmd_free_and_done(h, cp, cmd);
2619
2620 /* For I/O accelerator commands, copy over some fields to the normal
2621 * CISS header used below for error handling.
2622 */
2623 if (cp->cmd_type == CMD_IOACCEL1) {
2624 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2625 cp->Header.SGList = scsi_sg_count(cmd);
2626 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2627 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2628 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2629 cp->Header.tag = c->tag;
2630 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2631 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2632
2633 /* Any RAID offload error results in retry which will use
2634 * the normal I/O path so the controller can handle whatever's
2635 * wrong.
2636 */
2637 if (is_logical_device(dev)) {
2638 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2639 dev->offload_enabled = 0;
2640 return hpsa_retry_cmd(h, cp);
2641 }
2642 }
2643
2644 /* an error has occurred */
2645 switch (ei->CommandStatus) {
2646
2647 case CMD_TARGET_STATUS:
2648 cmd->result |= ei->ScsiStatus;
2649 /* copy the sense data */
2650 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2651 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2652 else
2653 sense_data_size = sizeof(ei->SenseInfo);
2654 if (ei->SenseLen < sense_data_size)
2655 sense_data_size = ei->SenseLen;
2656 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2657 if (ei->ScsiStatus)
2658 decode_sense_data(ei->SenseInfo, sense_data_size,
2659 &sense_key, &asc, &ascq);
2660 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2661 switch (sense_key) {
2662 case ABORTED_COMMAND:
2663 cmd->result |= DID_SOFT_ERROR << 16;
2664 break;
2665 case UNIT_ATTENTION:
2666 if (asc == 0x3F && ascq == 0x0E)
2667 h->drv_req_rescan = 1;
2668 break;
2669 case ILLEGAL_REQUEST:
2670 if (asc == 0x25 && ascq == 0x00) {
2671 dev->removed = 1;
2672 cmd->result = DID_NO_CONNECT << 16;
2673 }
2674 break;
2675 }
2676 break;
2677 }
2678 /* Problem was not a check condition
2679 * Pass it up to the upper layers...
2680 */
2681 if (ei->ScsiStatus) {
2682 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2683 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2684 "Returning result: 0x%x\n",
2685 cp, ei->ScsiStatus,
2686 sense_key, asc, ascq,
2687 cmd->result);
2688 } else { /* scsi status is zero??? How??? */
2689 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2690 "Returning no connection.\n", cp),
2691
2692 /* Ordinarily, this case should never happen,
2693 * but there is a bug in some released firmware
2694 * revisions that allows it to happen if, for
2695 * example, a 4100 backplane loses power and
2696 * the tape drive is in it. We assume that
2697 * it's a fatal error of some kind because we
2698 * can't show that it wasn't. We will make it
2699 * look like selection timeout since that is
2700 * the most common reason for this to occur,
2701 * and it's severe enough.
2702 */
2703
2704 cmd->result = DID_NO_CONNECT << 16;
2705 }
2706 break;
2707
2708 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2709 break;
2710 case CMD_DATA_OVERRUN:
2711 dev_warn(&h->pdev->dev,
2712 "CDB %16phN data overrun\n", cp->Request.CDB);
2713 break;
2714 case CMD_INVALID: {
2715 /* print_bytes(cp, sizeof(*cp), 1, 0);
2716 print_cmd(cp); */
2717 /* We get CMD_INVALID if you address a non-existent device
2718 * instead of a selection timeout (no response). You will
2719 * see this if you yank out a drive, then try to access it.
2720 * This is kind of a shame because it means that any other
2721 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2722 * missing target. */
2723 cmd->result = DID_NO_CONNECT << 16;
2724 }
2725 break;
2726 case CMD_PROTOCOL_ERR:
2727 cmd->result = DID_ERROR << 16;
2728 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2729 cp->Request.CDB);
2730 break;
2731 case CMD_HARDWARE_ERR:
2732 cmd->result = DID_ERROR << 16;
2733 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2734 cp->Request.CDB);
2735 break;
2736 case CMD_CONNECTION_LOST:
2737 cmd->result = DID_ERROR << 16;
2738 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2739 cp->Request.CDB);
2740 break;
2741 case CMD_ABORTED:
2742 cmd->result = DID_ABORT << 16;
2743 break;
2744 case CMD_ABORT_FAILED:
2745 cmd->result = DID_ERROR << 16;
2746 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2747 cp->Request.CDB);
2748 break;
2749 case CMD_UNSOLICITED_ABORT:
2750 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2751 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2752 cp->Request.CDB);
2753 break;
2754 case CMD_TIMEOUT:
2755 cmd->result = DID_TIME_OUT << 16;
2756 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2757 cp->Request.CDB);
2758 break;
2759 case CMD_UNABORTABLE:
2760 cmd->result = DID_ERROR << 16;
2761 dev_warn(&h->pdev->dev, "Command unabortable\n");
2762 break;
2763 case CMD_TMF_STATUS:
2764 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2765 cmd->result = DID_ERROR << 16;
2766 break;
2767 case CMD_IOACCEL_DISABLED:
2768 /* This only handles the direct pass-through case since RAID
2769 * offload is handled above. Just attempt a retry.
2770 */
2771 cmd->result = DID_SOFT_ERROR << 16;
2772 dev_warn(&h->pdev->dev,
2773 "cp %p had HP SSD Smart Path error\n", cp);
2774 break;
2775 default:
2776 cmd->result = DID_ERROR << 16;
2777 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2778 cp, ei->CommandStatus);
2779 }
2780
2781 return hpsa_cmd_free_and_done(h, cp, cmd);
2782}
2783
2784static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2785 int sg_used, enum dma_data_direction data_direction)
2786{
2787 int i;
2788
2789 for (i = 0; i < sg_used; i++)
2790 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2791 le32_to_cpu(c->SG[i].Len),
2792 data_direction);
2793}
2794
2795static int hpsa_map_one(struct pci_dev *pdev,
2796 struct CommandList *cp,
2797 unsigned char *buf,
2798 size_t buflen,
2799 enum dma_data_direction data_direction)
2800{
2801 u64 addr64;
2802
2803 if (buflen == 0 || data_direction == DMA_NONE) {
2804 cp->Header.SGList = 0;
2805 cp->Header.SGTotal = cpu_to_le16(0);
2806 return 0;
2807 }
2808
2809 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2810 if (dma_mapping_error(&pdev->dev, addr64)) {
2811 /* Prevent subsequent unmap of something never mapped */
2812 cp->Header.SGList = 0;
2813 cp->Header.SGTotal = cpu_to_le16(0);
2814 return -1;
2815 }
2816 cp->SG[0].Addr = cpu_to_le64(addr64);
2817 cp->SG[0].Len = cpu_to_le32(buflen);
2818 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2819 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2820 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2821 return 0;
2822}
2823
2824#define NO_TIMEOUT ((unsigned long) -1)
2825#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2826static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2827 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2828{
2829 DECLARE_COMPLETION_ONSTACK(wait);
2830
2831 c->waiting = &wait;
2832 __enqueue_cmd_and_start_io(h, c, reply_queue);
2833 if (timeout_msecs == NO_TIMEOUT) {
2834 /* TODO: get rid of this no-timeout thing */
2835 wait_for_completion_io(&wait);
2836 return IO_OK;
2837 }
2838 if (!wait_for_completion_io_timeout(&wait,
2839 msecs_to_jiffies(timeout_msecs))) {
2840 dev_warn(&h->pdev->dev, "Command timed out.\n");
2841 return -ETIMEDOUT;
2842 }
2843 return IO_OK;
2844}
2845
2846static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2847 int reply_queue, unsigned long timeout_msecs)
2848{
2849 if (unlikely(lockup_detected(h))) {
2850 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2851 return IO_OK;
2852 }
2853 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2854}
2855
2856static u32 lockup_detected(struct ctlr_info *h)
2857{
2858 int cpu;
2859 u32 rc, *lockup_detected;
2860
2861 cpu = get_cpu();
2862 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2863 rc = *lockup_detected;
2864 put_cpu();
2865 return rc;
2866}
2867
2868#define MAX_DRIVER_CMD_RETRIES 25
2869static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2870 struct CommandList *c, enum dma_data_direction data_direction,
2871 unsigned long timeout_msecs)
2872{
2873 int backoff_time = 10, retry_count = 0;
2874 int rc;
2875
2876 do {
2877 memset(c->err_info, 0, sizeof(*c->err_info));
2878 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2879 timeout_msecs);
2880 if (rc)
2881 break;
2882 retry_count++;
2883 if (retry_count > 3) {
2884 msleep(backoff_time);
2885 if (backoff_time < 1000)
2886 backoff_time *= 2;
2887 }
2888 } while ((check_for_unit_attention(h, c) ||
2889 check_for_busy(h, c)) &&
2890 retry_count <= MAX_DRIVER_CMD_RETRIES);
2891 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2892 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2893 rc = -EIO;
2894 return rc;
2895}
2896
2897static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2898 struct CommandList *c)
2899{
2900 const u8 *cdb = c->Request.CDB;
2901 const u8 *lun = c->Header.LUN.LunAddrBytes;
2902
2903 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2904 txt, lun, cdb);
2905}
2906
2907static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2908 struct CommandList *cp)
2909{
2910 const struct ErrorInfo *ei = cp->err_info;
2911 struct device *d = &cp->h->pdev->dev;
2912 u8 sense_key, asc, ascq;
2913 int sense_len;
2914
2915 switch (ei->CommandStatus) {
2916 case CMD_TARGET_STATUS:
2917 if (ei->SenseLen > sizeof(ei->SenseInfo))
2918 sense_len = sizeof(ei->SenseInfo);
2919 else
2920 sense_len = ei->SenseLen;
2921 decode_sense_data(ei->SenseInfo, sense_len,
2922 &sense_key, &asc, &ascq);
2923 hpsa_print_cmd(h, "SCSI status", cp);
2924 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2925 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2926 sense_key, asc, ascq);
2927 else
2928 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2929 if (ei->ScsiStatus == 0)
2930 dev_warn(d, "SCSI status is abnormally zero. "
2931 "(probably indicates selection timeout "
2932 "reported incorrectly due to a known "
2933 "firmware bug, circa July, 2001.)\n");
2934 break;
2935 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2936 break;
2937 case CMD_DATA_OVERRUN:
2938 hpsa_print_cmd(h, "overrun condition", cp);
2939 break;
2940 case CMD_INVALID: {
2941 /* controller unfortunately reports SCSI passthru's
2942 * to non-existent targets as invalid commands.
2943 */
2944 hpsa_print_cmd(h, "invalid command", cp);
2945 dev_warn(d, "probably means device no longer present\n");
2946 }
2947 break;
2948 case CMD_PROTOCOL_ERR:
2949 hpsa_print_cmd(h, "protocol error", cp);
2950 break;
2951 case CMD_HARDWARE_ERR:
2952 hpsa_print_cmd(h, "hardware error", cp);
2953 break;
2954 case CMD_CONNECTION_LOST:
2955 hpsa_print_cmd(h, "connection lost", cp);
2956 break;
2957 case CMD_ABORTED:
2958 hpsa_print_cmd(h, "aborted", cp);
2959 break;
2960 case CMD_ABORT_FAILED:
2961 hpsa_print_cmd(h, "abort failed", cp);
2962 break;
2963 case CMD_UNSOLICITED_ABORT:
2964 hpsa_print_cmd(h, "unsolicited abort", cp);
2965 break;
2966 case CMD_TIMEOUT:
2967 hpsa_print_cmd(h, "timed out", cp);
2968 break;
2969 case CMD_UNABORTABLE:
2970 hpsa_print_cmd(h, "unabortable", cp);
2971 break;
2972 case CMD_CTLR_LOCKUP:
2973 hpsa_print_cmd(h, "controller lockup detected", cp);
2974 break;
2975 default:
2976 hpsa_print_cmd(h, "unknown status", cp);
2977 dev_warn(d, "Unknown command status %x\n",
2978 ei->CommandStatus);
2979 }
2980}
2981
2982static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2983 u8 page, u8 *buf, size_t bufsize)
2984{
2985 int rc = IO_OK;
2986 struct CommandList *c;
2987 struct ErrorInfo *ei;
2988
2989 c = cmd_alloc(h);
2990 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2991 page, scsi3addr, TYPE_CMD)) {
2992 rc = -1;
2993 goto out;
2994 }
2995 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
2996 NO_TIMEOUT);
2997 if (rc)
2998 goto out;
2999 ei = c->err_info;
3000 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3001 hpsa_scsi_interpret_error(h, c);
3002 rc = -1;
3003 }
3004out:
3005 cmd_free(h, c);
3006 return rc;
3007}
3008
3009static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3010 u8 *scsi3addr)
3011{
3012 u8 *buf;
3013 u64 sa = 0;
3014 int rc = 0;
3015
3016 buf = kzalloc(1024, GFP_KERNEL);
3017 if (!buf)
3018 return 0;
3019
3020 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3021 buf, 1024);
3022
3023 if (rc)
3024 goto out;
3025
3026 sa = get_unaligned_be64(buf+12);
3027
3028out:
3029 kfree(buf);
3030 return sa;
3031}
3032
3033static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3034 u16 page, unsigned char *buf,
3035 unsigned char bufsize)
3036{
3037 int rc = IO_OK;
3038 struct CommandList *c;
3039 struct ErrorInfo *ei;
3040
3041 c = cmd_alloc(h);
3042
3043 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3044 page, scsi3addr, TYPE_CMD)) {
3045 rc = -1;
3046 goto out;
3047 }
3048 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3049 NO_TIMEOUT);
3050 if (rc)
3051 goto out;
3052 ei = c->err_info;
3053 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3054 hpsa_scsi_interpret_error(h, c);
3055 rc = -1;
3056 }
3057out:
3058 cmd_free(h, c);
3059 return rc;
3060}
3061
3062static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3063 u8 reset_type, int reply_queue)
3064{
3065 int rc = IO_OK;
3066 struct CommandList *c;
3067 struct ErrorInfo *ei;
3068
3069 c = cmd_alloc(h);
3070 c->device = dev;
3071
3072 /* fill_cmd can't fail here, no data buffer to map. */
3073 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3074 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3075 if (rc) {
3076 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3077 goto out;
3078 }
3079 /* no unmap needed here because no data xfer. */
3080
3081 ei = c->err_info;
3082 if (ei->CommandStatus != 0) {
3083 hpsa_scsi_interpret_error(h, c);
3084 rc = -1;
3085 }
3086out:
3087 cmd_free(h, c);
3088 return rc;
3089}
3090
3091static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3092 struct hpsa_scsi_dev_t *dev,
3093 unsigned char *scsi3addr)
3094{
3095 int i;
3096 bool match = false;
3097 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3098 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3099
3100 if (hpsa_is_cmd_idle(c))
3101 return false;
3102
3103 switch (c->cmd_type) {
3104 case CMD_SCSI:
3105 case CMD_IOCTL_PEND:
3106 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3107 sizeof(c->Header.LUN.LunAddrBytes));
3108 break;
3109
3110 case CMD_IOACCEL1:
3111 case CMD_IOACCEL2:
3112 if (c->phys_disk == dev) {
3113 /* HBA mode match */
3114 match = true;
3115 } else {
3116 /* Possible RAID mode -- check each phys dev. */
3117 /* FIXME: Do we need to take out a lock here? If
3118 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3119 * instead. */
3120 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3121 /* FIXME: an alternate test might be
3122 *
3123 * match = dev->phys_disk[i]->ioaccel_handle
3124 * == c2->scsi_nexus; */
3125 match = dev->phys_disk[i] == c->phys_disk;
3126 }
3127 }
3128 break;
3129
3130 case IOACCEL2_TMF:
3131 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3132 match = dev->phys_disk[i]->ioaccel_handle ==
3133 le32_to_cpu(ac->it_nexus);
3134 }
3135 break;
3136
3137 case 0: /* The command is in the middle of being initialized. */
3138 match = false;
3139 break;
3140
3141 default:
3142 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3143 c->cmd_type);
3144 BUG();
3145 }
3146
3147 return match;
3148}
3149
3150static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3151 u8 reset_type, int reply_queue)
3152{
3153 int rc = 0;
3154
3155 /* We can really only handle one reset at a time */
3156 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3157 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3158 return -EINTR;
3159 }
3160
3161 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3162 if (!rc) {
3163 /* incremented by sending the reset request */
3164 atomic_dec(&dev->commands_outstanding);
3165 wait_event(h->event_sync_wait_queue,
3166 atomic_read(&dev->commands_outstanding) <= 0 ||
3167 lockup_detected(h));
3168 }
3169
3170 if (unlikely(lockup_detected(h))) {
3171 dev_warn(&h->pdev->dev,
3172 "Controller lockup detected during reset wait\n");
3173 rc = -ENODEV;
3174 }
3175
3176 if (!rc)
3177 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3178
3179 mutex_unlock(&h->reset_mutex);
3180 return rc;
3181}
3182
3183static void hpsa_get_raid_level(struct ctlr_info *h,
3184 unsigned char *scsi3addr, unsigned char *raid_level)
3185{
3186 int rc;
3187 unsigned char *buf;
3188
3189 *raid_level = RAID_UNKNOWN;
3190 buf = kzalloc(64, GFP_KERNEL);
3191 if (!buf)
3192 return;
3193
3194 if (!hpsa_vpd_page_supported(h, scsi3addr,
3195 HPSA_VPD_LV_DEVICE_GEOMETRY))
3196 goto exit;
3197
3198 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3199 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3200
3201 if (rc == 0)
3202 *raid_level = buf[8];
3203 if (*raid_level > RAID_UNKNOWN)
3204 *raid_level = RAID_UNKNOWN;
3205exit:
3206 kfree(buf);
3207 return;
3208}
3209
3210#define HPSA_MAP_DEBUG
3211#ifdef HPSA_MAP_DEBUG
3212static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3213 struct raid_map_data *map_buff)
3214{
3215 struct raid_map_disk_data *dd = &map_buff->data[0];
3216 int map, row, col;
3217 u16 map_cnt, row_cnt, disks_per_row;
3218
3219 if (rc != 0)
3220 return;
3221
3222 /* Show details only if debugging has been activated. */
3223 if (h->raid_offload_debug < 2)
3224 return;
3225
3226 dev_info(&h->pdev->dev, "structure_size = %u\n",
3227 le32_to_cpu(map_buff->structure_size));
3228 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3229 le32_to_cpu(map_buff->volume_blk_size));
3230 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3231 le64_to_cpu(map_buff->volume_blk_cnt));
3232 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3233 map_buff->phys_blk_shift);
3234 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3235 map_buff->parity_rotation_shift);
3236 dev_info(&h->pdev->dev, "strip_size = %u\n",
3237 le16_to_cpu(map_buff->strip_size));
3238 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3239 le64_to_cpu(map_buff->disk_starting_blk));
3240 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3241 le64_to_cpu(map_buff->disk_blk_cnt));
3242 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3243 le16_to_cpu(map_buff->data_disks_per_row));
3244 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3245 le16_to_cpu(map_buff->metadata_disks_per_row));
3246 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3247 le16_to_cpu(map_buff->row_cnt));
3248 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3249 le16_to_cpu(map_buff->layout_map_count));
3250 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3251 le16_to_cpu(map_buff->flags));
3252 dev_info(&h->pdev->dev, "encryption = %s\n",
3253 le16_to_cpu(map_buff->flags) &
3254 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3255 dev_info(&h->pdev->dev, "dekindex = %u\n",
3256 le16_to_cpu(map_buff->dekindex));
3257 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3258 for (map = 0; map < map_cnt; map++) {
3259 dev_info(&h->pdev->dev, "Map%u:\n", map);
3260 row_cnt = le16_to_cpu(map_buff->row_cnt);
3261 for (row = 0; row < row_cnt; row++) {
3262 dev_info(&h->pdev->dev, " Row%u:\n", row);
3263 disks_per_row =
3264 le16_to_cpu(map_buff->data_disks_per_row);
3265 for (col = 0; col < disks_per_row; col++, dd++)
3266 dev_info(&h->pdev->dev,
3267 " D%02u: h=0x%04x xor=%u,%u\n",
3268 col, dd->ioaccel_handle,
3269 dd->xor_mult[0], dd->xor_mult[1]);
3270 disks_per_row =
3271 le16_to_cpu(map_buff->metadata_disks_per_row);
3272 for (col = 0; col < disks_per_row; col++, dd++)
3273 dev_info(&h->pdev->dev,
3274 " M%02u: h=0x%04x xor=%u,%u\n",
3275 col, dd->ioaccel_handle,
3276 dd->xor_mult[0], dd->xor_mult[1]);
3277 }
3278 }
3279}
3280#else
3281static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3282 __attribute__((unused)) int rc,
3283 __attribute__((unused)) struct raid_map_data *map_buff)
3284{
3285}
3286#endif
3287
3288static int hpsa_get_raid_map(struct ctlr_info *h,
3289 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3290{
3291 int rc = 0;
3292 struct CommandList *c;
3293 struct ErrorInfo *ei;
3294
3295 c = cmd_alloc(h);
3296
3297 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3298 sizeof(this_device->raid_map), 0,
3299 scsi3addr, TYPE_CMD)) {
3300 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3301 cmd_free(h, c);
3302 return -1;
3303 }
3304 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3305 NO_TIMEOUT);
3306 if (rc)
3307 goto out;
3308 ei = c->err_info;
3309 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3310 hpsa_scsi_interpret_error(h, c);
3311 rc = -1;
3312 goto out;
3313 }
3314 cmd_free(h, c);
3315
3316 /* @todo in the future, dynamically allocate RAID map memory */
3317 if (le32_to_cpu(this_device->raid_map.structure_size) >
3318 sizeof(this_device->raid_map)) {
3319 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3320 rc = -1;
3321 }
3322 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3323 return rc;
3324out:
3325 cmd_free(h, c);
3326 return rc;
3327}
3328
3329static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3330 unsigned char scsi3addr[], u16 bmic_device_index,
3331 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3332{
3333 int rc = IO_OK;
3334 struct CommandList *c;
3335 struct ErrorInfo *ei;
3336
3337 c = cmd_alloc(h);
3338
3339 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3340 0, RAID_CTLR_LUNID, TYPE_CMD);
3341 if (rc)
3342 goto out;
3343
3344 c->Request.CDB[2] = bmic_device_index & 0xff;
3345 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3346
3347 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3348 NO_TIMEOUT);
3349 if (rc)
3350 goto out;
3351 ei = c->err_info;
3352 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3353 hpsa_scsi_interpret_error(h, c);
3354 rc = -1;
3355 }
3356out:
3357 cmd_free(h, c);
3358 return rc;
3359}
3360
3361static int hpsa_bmic_id_controller(struct ctlr_info *h,
3362 struct bmic_identify_controller *buf, size_t bufsize)
3363{
3364 int rc = IO_OK;
3365 struct CommandList *c;
3366 struct ErrorInfo *ei;
3367
3368 c = cmd_alloc(h);
3369
3370 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3371 0, RAID_CTLR_LUNID, TYPE_CMD);
3372 if (rc)
3373 goto out;
3374
3375 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3376 NO_TIMEOUT);
3377 if (rc)
3378 goto out;
3379 ei = c->err_info;
3380 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3381 hpsa_scsi_interpret_error(h, c);
3382 rc = -1;
3383 }
3384out:
3385 cmd_free(h, c);
3386 return rc;
3387}
3388
3389static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3390 unsigned char scsi3addr[], u16 bmic_device_index,
3391 struct bmic_identify_physical_device *buf, size_t bufsize)
3392{
3393 int rc = IO_OK;
3394 struct CommandList *c;
3395 struct ErrorInfo *ei;
3396
3397 c = cmd_alloc(h);
3398 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3399 0, RAID_CTLR_LUNID, TYPE_CMD);
3400 if (rc)
3401 goto out;
3402
3403 c->Request.CDB[2] = bmic_device_index & 0xff;
3404 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3405
3406 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3407 NO_TIMEOUT);
3408 ei = c->err_info;
3409 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3410 hpsa_scsi_interpret_error(h, c);
3411 rc = -1;
3412 }
3413out:
3414 cmd_free(h, c);
3415
3416 return rc;
3417}
3418
3419/*
3420 * get enclosure information
3421 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3422 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3423 * Uses id_physical_device to determine the box_index.
3424 */
3425static void hpsa_get_enclosure_info(struct ctlr_info *h,
3426 unsigned char *scsi3addr,
3427 struct ReportExtendedLUNdata *rlep, int rle_index,
3428 struct hpsa_scsi_dev_t *encl_dev)
3429{
3430 int rc = -1;
3431 struct CommandList *c = NULL;
3432 struct ErrorInfo *ei = NULL;
3433 struct bmic_sense_storage_box_params *bssbp = NULL;
3434 struct bmic_identify_physical_device *id_phys = NULL;
3435 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3436 u16 bmic_device_index = 0;
3437
3438 encl_dev->eli =
3439 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3440
3441 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3442
3443 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3444 rc = IO_OK;
3445 goto out;
3446 }
3447
3448 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3449 rc = IO_OK;
3450 goto out;
3451 }
3452
3453 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3454 if (!bssbp)
3455 goto out;
3456
3457 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3458 if (!id_phys)
3459 goto out;
3460
3461 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3462 id_phys, sizeof(*id_phys));
3463 if (rc) {
3464 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3465 __func__, encl_dev->external, bmic_device_index);
3466 goto out;
3467 }
3468
3469 c = cmd_alloc(h);
3470
3471 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3472 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3473
3474 if (rc)
3475 goto out;
3476
3477 if (id_phys->phys_connector[1] == 'E')
3478 c->Request.CDB[5] = id_phys->box_index;
3479 else
3480 c->Request.CDB[5] = 0;
3481
3482 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3483 NO_TIMEOUT);
3484 if (rc)
3485 goto out;
3486
3487 ei = c->err_info;
3488 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3489 rc = -1;
3490 goto out;
3491 }
3492
3493 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3494 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3495 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3496
3497 rc = IO_OK;
3498out:
3499 kfree(bssbp);
3500 kfree(id_phys);
3501
3502 if (c)
3503 cmd_free(h, c);
3504
3505 if (rc != IO_OK)
3506 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3507 "Error, could not get enclosure information");
3508}
3509
3510static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3511 unsigned char *scsi3addr)
3512{
3513 struct ReportExtendedLUNdata *physdev;
3514 u32 nphysicals;
3515 u64 sa = 0;
3516 int i;
3517
3518 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3519 if (!physdev)
3520 return 0;
3521
3522 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3523 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3524 kfree(physdev);
3525 return 0;
3526 }
3527 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3528
3529 for (i = 0; i < nphysicals; i++)
3530 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3531 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3532 break;
3533 }
3534
3535 kfree(physdev);
3536
3537 return sa;
3538}
3539
3540static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3541 struct hpsa_scsi_dev_t *dev)
3542{
3543 int rc;
3544 u64 sa = 0;
3545
3546 if (is_hba_lunid(scsi3addr)) {
3547 struct bmic_sense_subsystem_info *ssi;
3548
3549 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3550 if (!ssi)
3551 return;
3552
3553 rc = hpsa_bmic_sense_subsystem_information(h,
3554 scsi3addr, 0, ssi, sizeof(*ssi));
3555 if (rc == 0) {
3556 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3557 h->sas_address = sa;
3558 }
3559
3560 kfree(ssi);
3561 } else
3562 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3563
3564 dev->sas_address = sa;
3565}
3566
3567static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3568 struct ReportExtendedLUNdata *physdev)
3569{
3570 u32 nphysicals;
3571 int i;
3572
3573 if (h->discovery_polling)
3574 return;
3575
3576 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3577
3578 for (i = 0; i < nphysicals; i++) {
3579 if (physdev->LUN[i].device_type ==
3580 BMIC_DEVICE_TYPE_CONTROLLER
3581 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3582 dev_info(&h->pdev->dev,
3583 "External controller present, activate discovery polling and disable rld caching\n");
3584 hpsa_disable_rld_caching(h);
3585 h->discovery_polling = 1;
3586 break;
3587 }
3588 }
3589}
3590
3591/* Get a device id from inquiry page 0x83 */
3592static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3593 unsigned char scsi3addr[], u8 page)
3594{
3595 int rc;
3596 int i;
3597 int pages;
3598 unsigned char *buf, bufsize;
3599
3600 buf = kzalloc(256, GFP_KERNEL);
3601 if (!buf)
3602 return false;
3603
3604 /* Get the size of the page list first */
3605 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3606 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3607 buf, HPSA_VPD_HEADER_SZ);
3608 if (rc != 0)
3609 goto exit_unsupported;
3610 pages = buf[3];
3611 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3612 bufsize = pages + HPSA_VPD_HEADER_SZ;
3613 else
3614 bufsize = 255;
3615
3616 /* Get the whole VPD page list */
3617 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3618 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3619 buf, bufsize);
3620 if (rc != 0)
3621 goto exit_unsupported;
3622
3623 pages = buf[3];
3624 for (i = 1; i <= pages; i++)
3625 if (buf[3 + i] == page)
3626 goto exit_supported;
3627exit_unsupported:
3628 kfree(buf);
3629 return false;
3630exit_supported:
3631 kfree(buf);
3632 return true;
3633}
3634
3635/*
3636 * Called during a scan operation.
3637 * Sets ioaccel status on the new device list, not the existing device list
3638 *
3639 * The device list used during I/O will be updated later in
3640 * adjust_hpsa_scsi_table.
3641 */
3642static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3643 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3644{
3645 int rc;
3646 unsigned char *buf;
3647 u8 ioaccel_status;
3648
3649 this_device->offload_config = 0;
3650 this_device->offload_enabled = 0;
3651 this_device->offload_to_be_enabled = 0;
3652
3653 buf = kzalloc(64, GFP_KERNEL);
3654 if (!buf)
3655 return;
3656 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3657 goto out;
3658 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3659 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3660 if (rc != 0)
3661 goto out;
3662
3663#define IOACCEL_STATUS_BYTE 4
3664#define OFFLOAD_CONFIGURED_BIT 0x01
3665#define OFFLOAD_ENABLED_BIT 0x02
3666 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3667 this_device->offload_config =
3668 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3669 if (this_device->offload_config) {
3670 this_device->offload_to_be_enabled =
3671 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3672 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3673 this_device->offload_to_be_enabled = 0;
3674 }
3675
3676out:
3677 kfree(buf);
3678 return;
3679}
3680
3681/* Get the device id from inquiry page 0x83 */
3682static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3683 unsigned char *device_id, int index, int buflen)
3684{
3685 int rc;
3686 unsigned char *buf;
3687
3688 /* Does controller have VPD for device id? */
3689 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3690 return 1; /* not supported */
3691
3692 buf = kzalloc(64, GFP_KERNEL);
3693 if (!buf)
3694 return -ENOMEM;
3695
3696 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3697 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3698 if (rc == 0) {
3699 if (buflen > 16)
3700 buflen = 16;
3701 memcpy(device_id, &buf[8], buflen);
3702 }
3703
3704 kfree(buf);
3705
3706 return rc; /*0 - got id, otherwise, didn't */
3707}
3708
3709static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3710 void *buf, int bufsize,
3711 int extended_response)
3712{
3713 int rc = IO_OK;
3714 struct CommandList *c;
3715 unsigned char scsi3addr[8];
3716 struct ErrorInfo *ei;
3717
3718 c = cmd_alloc(h);
3719
3720 /* address the controller */
3721 memset(scsi3addr, 0, sizeof(scsi3addr));
3722 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3723 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3724 rc = -EAGAIN;
3725 goto out;
3726 }
3727 if (extended_response)
3728 c->Request.CDB[1] = extended_response;
3729 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3730 NO_TIMEOUT);
3731 if (rc)
3732 goto out;
3733 ei = c->err_info;
3734 if (ei->CommandStatus != 0 &&
3735 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3736 hpsa_scsi_interpret_error(h, c);
3737 rc = -EIO;
3738 } else {
3739 struct ReportLUNdata *rld = buf;
3740
3741 if (rld->extended_response_flag != extended_response) {
3742 if (!h->legacy_board) {
3743 dev_err(&h->pdev->dev,
3744 "report luns requested format %u, got %u\n",
3745 extended_response,
3746 rld->extended_response_flag);
3747 rc = -EINVAL;
3748 } else
3749 rc = -EOPNOTSUPP;
3750 }
3751 }
3752out:
3753 cmd_free(h, c);
3754 return rc;
3755}
3756
3757static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3758 struct ReportExtendedLUNdata *buf, int bufsize)
3759{
3760 int rc;
3761 struct ReportLUNdata *lbuf;
3762
3763 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3764 HPSA_REPORT_PHYS_EXTENDED);
3765 if (!rc || rc != -EOPNOTSUPP)
3766 return rc;
3767
3768 /* REPORT PHYS EXTENDED is not supported */
3769 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3770 if (!lbuf)
3771 return -ENOMEM;
3772
3773 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3774 if (!rc) {
3775 int i;
3776 u32 nphys;
3777
3778 /* Copy ReportLUNdata header */
3779 memcpy(buf, lbuf, 8);
3780 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3781 for (i = 0; i < nphys; i++)
3782 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3783 }
3784 kfree(lbuf);
3785 return rc;
3786}
3787
3788static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3789 struct ReportLUNdata *buf, int bufsize)
3790{
3791 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3792}
3793
3794static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3795 int bus, int target, int lun)
3796{
3797 device->bus = bus;
3798 device->target = target;
3799 device->lun = lun;
3800}
3801
3802/* Use VPD inquiry to get details of volume status */
3803static int hpsa_get_volume_status(struct ctlr_info *h,
3804 unsigned char scsi3addr[])
3805{
3806 int rc;
3807 int status;
3808 int size;
3809 unsigned char *buf;
3810
3811 buf = kzalloc(64, GFP_KERNEL);
3812 if (!buf)
3813 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3814
3815 /* Does controller have VPD for logical volume status? */
3816 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3817 goto exit_failed;
3818
3819 /* Get the size of the VPD return buffer */
3820 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3821 buf, HPSA_VPD_HEADER_SZ);
3822 if (rc != 0)
3823 goto exit_failed;
3824 size = buf[3];
3825
3826 /* Now get the whole VPD buffer */
3827 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3828 buf, size + HPSA_VPD_HEADER_SZ);
3829 if (rc != 0)
3830 goto exit_failed;
3831 status = buf[4]; /* status byte */
3832
3833 kfree(buf);
3834 return status;
3835exit_failed:
3836 kfree(buf);
3837 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3838}
3839
3840/* Determine offline status of a volume.
3841 * Return either:
3842 * 0 (not offline)
3843 * 0xff (offline for unknown reasons)
3844 * # (integer code indicating one of several NOT READY states
3845 * describing why a volume is to be kept offline)
3846 */
3847static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3848 unsigned char scsi3addr[])
3849{
3850 struct CommandList *c;
3851 unsigned char *sense;
3852 u8 sense_key, asc, ascq;
3853 int sense_len;
3854 int rc, ldstat = 0;
3855 u16 cmd_status;
3856 u8 scsi_status;
3857#define ASC_LUN_NOT_READY 0x04
3858#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3859#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3860
3861 c = cmd_alloc(h);
3862
3863 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3864 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3865 NO_TIMEOUT);
3866 if (rc) {
3867 cmd_free(h, c);
3868 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3869 }
3870 sense = c->err_info->SenseInfo;
3871 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3872 sense_len = sizeof(c->err_info->SenseInfo);
3873 else
3874 sense_len = c->err_info->SenseLen;
3875 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3876 cmd_status = c->err_info->CommandStatus;
3877 scsi_status = c->err_info->ScsiStatus;
3878 cmd_free(h, c);
3879
3880 /* Determine the reason for not ready state */
3881 ldstat = hpsa_get_volume_status(h, scsi3addr);
3882
3883 /* Keep volume offline in certain cases: */
3884 switch (ldstat) {
3885 case HPSA_LV_FAILED:
3886 case HPSA_LV_UNDERGOING_ERASE:
3887 case HPSA_LV_NOT_AVAILABLE:
3888 case HPSA_LV_UNDERGOING_RPI:
3889 case HPSA_LV_PENDING_RPI:
3890 case HPSA_LV_ENCRYPTED_NO_KEY:
3891 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3892 case HPSA_LV_UNDERGOING_ENCRYPTION:
3893 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3894 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3895 return ldstat;
3896 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3897 /* If VPD status page isn't available,
3898 * use ASC/ASCQ to determine state
3899 */
3900 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3901 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3902 return ldstat;
3903 break;
3904 default:
3905 break;
3906 }
3907 return HPSA_LV_OK;
3908}
3909
3910static int hpsa_update_device_info(struct ctlr_info *h,
3911 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3912 unsigned char *is_OBDR_device)
3913{
3914
3915#define OBDR_SIG_OFFSET 43
3916#define OBDR_TAPE_SIG "$DR-10"
3917#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3918#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3919
3920 unsigned char *inq_buff;
3921 unsigned char *obdr_sig;
3922 int rc = 0;
3923
3924 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3925 if (!inq_buff) {
3926 rc = -ENOMEM;
3927 goto bail_out;
3928 }
3929
3930 /* Do an inquiry to the device to see what it is. */
3931 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3932 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3933 dev_err(&h->pdev->dev,
3934 "%s: inquiry failed, device will be skipped.\n",
3935 __func__);
3936 rc = HPSA_INQUIRY_FAILED;
3937 goto bail_out;
3938 }
3939
3940 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3941 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3942
3943 this_device->devtype = (inq_buff[0] & 0x1f);
3944 memcpy(this_device->scsi3addr, scsi3addr, 8);
3945 memcpy(this_device->vendor, &inq_buff[8],
3946 sizeof(this_device->vendor));
3947 memcpy(this_device->model, &inq_buff[16],
3948 sizeof(this_device->model));
3949 this_device->rev = inq_buff[2];
3950 memset(this_device->device_id, 0,
3951 sizeof(this_device->device_id));
3952 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3953 sizeof(this_device->device_id)) < 0) {
3954 dev_err(&h->pdev->dev,
3955 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3956 h->ctlr, __func__,
3957 h->scsi_host->host_no,
3958 this_device->bus, this_device->target,
3959 this_device->lun,
3960 scsi_device_type(this_device->devtype),
3961 this_device->model);
3962 rc = HPSA_LV_FAILED;
3963 goto bail_out;
3964 }
3965
3966 if ((this_device->devtype == TYPE_DISK ||
3967 this_device->devtype == TYPE_ZBC) &&
3968 is_logical_dev_addr_mode(scsi3addr)) {
3969 unsigned char volume_offline;
3970
3971 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3972 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3973 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3974 volume_offline = hpsa_volume_offline(h, scsi3addr);
3975 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3976 h->legacy_board) {
3977 /*
3978 * Legacy boards might not support volume status
3979 */
3980 dev_info(&h->pdev->dev,
3981 "C0:T%d:L%d Volume status not available, assuming online.\n",
3982 this_device->target, this_device->lun);
3983 volume_offline = 0;
3984 }
3985 this_device->volume_offline = volume_offline;
3986 if (volume_offline == HPSA_LV_FAILED) {
3987 rc = HPSA_LV_FAILED;
3988 dev_err(&h->pdev->dev,
3989 "%s: LV failed, device will be skipped.\n",
3990 __func__);
3991 goto bail_out;
3992 }
3993 } else {
3994 this_device->raid_level = RAID_UNKNOWN;
3995 this_device->offload_config = 0;
3996 this_device->offload_enabled = 0;
3997 this_device->offload_to_be_enabled = 0;
3998 this_device->hba_ioaccel_enabled = 0;
3999 this_device->volume_offline = 0;
4000 this_device->queue_depth = h->nr_cmds;
4001 }
4002
4003 if (this_device->external)
4004 this_device->queue_depth = EXTERNAL_QD;
4005
4006 if (is_OBDR_device) {
4007 /* See if this is a One-Button-Disaster-Recovery device
4008 * by looking for "$DR-10" at offset 43 in inquiry data.
4009 */
4010 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4011 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4012 strncmp(obdr_sig, OBDR_TAPE_SIG,
4013 OBDR_SIG_LEN) == 0);
4014 }
4015 kfree(inq_buff);
4016 return 0;
4017
4018bail_out:
4019 kfree(inq_buff);
4020 return rc;
4021}
4022
4023/*
4024 * Helper function to assign bus, target, lun mapping of devices.
4025 * Logical drive target and lun are assigned at this time, but
4026 * physical device lun and target assignment are deferred (assigned
4027 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4028*/
4029static void figure_bus_target_lun(struct ctlr_info *h,
4030 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4031{
4032 u32 lunid = get_unaligned_le32(lunaddrbytes);
4033
4034 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4035 /* physical device, target and lun filled in later */
4036 if (is_hba_lunid(lunaddrbytes)) {
4037 int bus = HPSA_HBA_BUS;
4038
4039 if (!device->rev)
4040 bus = HPSA_LEGACY_HBA_BUS;
4041 hpsa_set_bus_target_lun(device,
4042 bus, 0, lunid & 0x3fff);
4043 } else
4044 /* defer target, lun assignment for physical devices */
4045 hpsa_set_bus_target_lun(device,
4046 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4047 return;
4048 }
4049 /* It's a logical device */
4050 if (device->external) {
4051 hpsa_set_bus_target_lun(device,
4052 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4053 lunid & 0x00ff);
4054 return;
4055 }
4056 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4057 0, lunid & 0x3fff);
4058}
4059
4060static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4061 int i, int nphysicals, int nlocal_logicals)
4062{
4063 /* In report logicals, local logicals are listed first,
4064 * then any externals.
4065 */
4066 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4067
4068 if (i == raid_ctlr_position)
4069 return 0;
4070
4071 if (i < logicals_start)
4072 return 0;
4073
4074 /* i is in logicals range, but still within local logicals */
4075 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4076 return 0;
4077
4078 return 1; /* it's an external lun */
4079}
4080
4081/*
4082 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4083 * logdev. The number of luns in physdev and logdev are returned in
4084 * *nphysicals and *nlogicals, respectively.
4085 * Returns 0 on success, -1 otherwise.
4086 */
4087static int hpsa_gather_lun_info(struct ctlr_info *h,
4088 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4089 struct ReportLUNdata *logdev, u32 *nlogicals)
4090{
4091 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4092 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4093 return -1;
4094 }
4095 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4096 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4097 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4098 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4099 *nphysicals = HPSA_MAX_PHYS_LUN;
4100 }
4101 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4102 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4103 return -1;
4104 }
4105 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4106 /* Reject Logicals in excess of our max capability. */
4107 if (*nlogicals > HPSA_MAX_LUN) {
4108 dev_warn(&h->pdev->dev,
4109 "maximum logical LUNs (%d) exceeded. "
4110 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4111 *nlogicals - HPSA_MAX_LUN);
4112 *nlogicals = HPSA_MAX_LUN;
4113 }
4114 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4115 dev_warn(&h->pdev->dev,
4116 "maximum logical + physical LUNs (%d) exceeded. "
4117 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4118 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4119 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4120 }
4121 return 0;
4122}
4123
4124static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4125 int i, int nphysicals, int nlogicals,
4126 struct ReportExtendedLUNdata *physdev_list,
4127 struct ReportLUNdata *logdev_list)
4128{
4129 /* Helper function, figure out where the LUN ID info is coming from
4130 * given index i, lists of physical and logical devices, where in
4131 * the list the raid controller is supposed to appear (first or last)
4132 */
4133
4134 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4135 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4136
4137 if (i == raid_ctlr_position)
4138 return RAID_CTLR_LUNID;
4139
4140 if (i < logicals_start)
4141 return &physdev_list->LUN[i -
4142 (raid_ctlr_position == 0)].lunid[0];
4143
4144 if (i < last_device)
4145 return &logdev_list->LUN[i - nphysicals -
4146 (raid_ctlr_position == 0)][0];
4147 BUG();
4148 return NULL;
4149}
4150
4151/* get physical drive ioaccel handle and queue depth */
4152static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4153 struct hpsa_scsi_dev_t *dev,
4154 struct ReportExtendedLUNdata *rlep, int rle_index,
4155 struct bmic_identify_physical_device *id_phys)
4156{
4157 int rc;
4158 struct ext_report_lun_entry *rle;
4159
4160 rle = &rlep->LUN[rle_index];
4161
4162 dev->ioaccel_handle = rle->ioaccel_handle;
4163 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4164 dev->hba_ioaccel_enabled = 1;
4165 memset(id_phys, 0, sizeof(*id_phys));
4166 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4167 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4168 sizeof(*id_phys));
4169 if (!rc)
4170 /* Reserve space for FW operations */
4171#define DRIVE_CMDS_RESERVED_FOR_FW 2
4172#define DRIVE_QUEUE_DEPTH 7
4173 dev->queue_depth =
4174 le16_to_cpu(id_phys->current_queue_depth_limit) -
4175 DRIVE_CMDS_RESERVED_FOR_FW;
4176 else
4177 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4178}
4179
4180static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4181 struct ReportExtendedLUNdata *rlep, int rle_index,
4182 struct bmic_identify_physical_device *id_phys)
4183{
4184 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4185
4186 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4187 this_device->hba_ioaccel_enabled = 1;
4188
4189 memcpy(&this_device->active_path_index,
4190 &id_phys->active_path_number,
4191 sizeof(this_device->active_path_index));
4192 memcpy(&this_device->path_map,
4193 &id_phys->redundant_path_present_map,
4194 sizeof(this_device->path_map));
4195 memcpy(&this_device->box,
4196 &id_phys->alternate_paths_phys_box_on_port,
4197 sizeof(this_device->box));
4198 memcpy(&this_device->phys_connector,
4199 &id_phys->alternate_paths_phys_connector,
4200 sizeof(this_device->phys_connector));
4201 memcpy(&this_device->bay,
4202 &id_phys->phys_bay_in_box,
4203 sizeof(this_device->bay));
4204}
4205
4206/* get number of local logical disks. */
4207static int hpsa_set_local_logical_count(struct ctlr_info *h,
4208 struct bmic_identify_controller *id_ctlr,
4209 u32 *nlocals)
4210{
4211 int rc;
4212
4213 if (!id_ctlr) {
4214 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4215 __func__);
4216 return -ENOMEM;
4217 }
4218 memset(id_ctlr, 0, sizeof(*id_ctlr));
4219 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4220 if (!rc)
4221 if (id_ctlr->configured_logical_drive_count < 255)
4222 *nlocals = id_ctlr->configured_logical_drive_count;
4223 else
4224 *nlocals = le16_to_cpu(
4225 id_ctlr->extended_logical_unit_count);
4226 else
4227 *nlocals = -1;
4228 return rc;
4229}
4230
4231static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4232{
4233 struct bmic_identify_physical_device *id_phys;
4234 bool is_spare = false;
4235 int rc;
4236
4237 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4238 if (!id_phys)
4239 return false;
4240
4241 rc = hpsa_bmic_id_physical_device(h,
4242 lunaddrbytes,
4243 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4244 id_phys, sizeof(*id_phys));
4245 if (rc == 0)
4246 is_spare = (id_phys->more_flags >> 6) & 0x01;
4247
4248 kfree(id_phys);
4249 return is_spare;
4250}
4251
4252#define RPL_DEV_FLAG_NON_DISK 0x1
4253#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4254#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4255
4256#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4257
4258static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4259 struct ext_report_lun_entry *rle)
4260{
4261 u8 device_flags;
4262 u8 device_type;
4263
4264 if (!MASKED_DEVICE(lunaddrbytes))
4265 return false;
4266
4267 device_flags = rle->device_flags;
4268 device_type = rle->device_type;
4269
4270 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4271 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4272 return false;
4273 return true;
4274 }
4275
4276 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4277 return false;
4278
4279 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4280 return false;
4281
4282 /*
4283 * Spares may be spun down, we do not want to
4284 * do an Inquiry to a RAID set spare drive as
4285 * that would have them spun up, that is a
4286 * performance hit because I/O to the RAID device
4287 * stops while the spin up occurs which can take
4288 * over 50 seconds.
4289 */
4290 if (hpsa_is_disk_spare(h, lunaddrbytes))
4291 return true;
4292
4293 return false;
4294}
4295
4296static void hpsa_update_scsi_devices(struct ctlr_info *h)
4297{
4298 /* the idea here is we could get notified
4299 * that some devices have changed, so we do a report
4300 * physical luns and report logical luns cmd, and adjust
4301 * our list of devices accordingly.
4302 *
4303 * The scsi3addr's of devices won't change so long as the
4304 * adapter is not reset. That means we can rescan and
4305 * tell which devices we already know about, vs. new
4306 * devices, vs. disappearing devices.
4307 */
4308 struct ReportExtendedLUNdata *physdev_list = NULL;
4309 struct ReportLUNdata *logdev_list = NULL;
4310 struct bmic_identify_physical_device *id_phys = NULL;
4311 struct bmic_identify_controller *id_ctlr = NULL;
4312 u32 nphysicals = 0;
4313 u32 nlogicals = 0;
4314 u32 nlocal_logicals = 0;
4315 u32 ndev_allocated = 0;
4316 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4317 int ncurrent = 0;
4318 int i, n_ext_target_devs, ndevs_to_allocate;
4319 int raid_ctlr_position;
4320 bool physical_device;
4321 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4322
4323 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4324 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4325 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4326 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4327 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4328 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4329
4330 if (!currentsd || !physdev_list || !logdev_list ||
4331 !tmpdevice || !id_phys || !id_ctlr) {
4332 dev_err(&h->pdev->dev, "out of memory\n");
4333 goto out;
4334 }
4335 memset(lunzerobits, 0, sizeof(lunzerobits));
4336
4337 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4338
4339 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4340 logdev_list, &nlogicals)) {
4341 h->drv_req_rescan = 1;
4342 goto out;
4343 }
4344
4345 /* Set number of local logicals (non PTRAID) */
4346 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4347 dev_warn(&h->pdev->dev,
4348 "%s: Can't determine number of local logical devices.\n",
4349 __func__);
4350 }
4351
4352 /* We might see up to the maximum number of logical and physical disks
4353 * plus external target devices, and a device for the local RAID
4354 * controller.
4355 */
4356 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4357
4358 hpsa_ext_ctrl_present(h, physdev_list);
4359
4360 /* Allocate the per device structures */
4361 for (i = 0; i < ndevs_to_allocate; i++) {
4362 if (i >= HPSA_MAX_DEVICES) {
4363 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4364 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4365 ndevs_to_allocate - HPSA_MAX_DEVICES);
4366 break;
4367 }
4368
4369 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4370 if (!currentsd[i]) {
4371 h->drv_req_rescan = 1;
4372 goto out;
4373 }
4374 ndev_allocated++;
4375 }
4376
4377 if (is_scsi_rev_5(h))
4378 raid_ctlr_position = 0;
4379 else
4380 raid_ctlr_position = nphysicals + nlogicals;
4381
4382 /* adjust our table of devices */
4383 n_ext_target_devs = 0;
4384 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4385 u8 *lunaddrbytes, is_OBDR = 0;
4386 int rc = 0;
4387 int phys_dev_index = i - (raid_ctlr_position == 0);
4388 bool skip_device = false;
4389
4390 memset(tmpdevice, 0, sizeof(*tmpdevice));
4391
4392 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4393
4394 /* Figure out where the LUN ID info is coming from */
4395 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4396 i, nphysicals, nlogicals, physdev_list, logdev_list);
4397
4398 /* Determine if this is a lun from an external target array */
4399 tmpdevice->external =
4400 figure_external_status(h, raid_ctlr_position, i,
4401 nphysicals, nlocal_logicals);
4402
4403 /*
4404 * Skip over some devices such as a spare.
4405 */
4406 if (!tmpdevice->external && physical_device) {
4407 skip_device = hpsa_skip_device(h, lunaddrbytes,
4408 &physdev_list->LUN[phys_dev_index]);
4409 if (skip_device)
4410 continue;
4411 }
4412
4413 /* Get device type, vendor, model, device id, raid_map */
4414 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4415 &is_OBDR);
4416 if (rc == -ENOMEM) {
4417 dev_warn(&h->pdev->dev,
4418 "Out of memory, rescan deferred.\n");
4419 h->drv_req_rescan = 1;
4420 goto out;
4421 }
4422 if (rc) {
4423 h->drv_req_rescan = 1;
4424 continue;
4425 }
4426
4427 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4428 this_device = currentsd[ncurrent];
4429
4430 *this_device = *tmpdevice;
4431 this_device->physical_device = physical_device;
4432
4433 /*
4434 * Expose all devices except for physical devices that
4435 * are masked.
4436 */
4437 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4438 this_device->expose_device = 0;
4439 else
4440 this_device->expose_device = 1;
4441
4442
4443 /*
4444 * Get the SAS address for physical devices that are exposed.
4445 */
4446 if (this_device->physical_device && this_device->expose_device)
4447 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4448
4449 switch (this_device->devtype) {
4450 case TYPE_ROM:
4451 /* We don't *really* support actual CD-ROM devices,
4452 * just "One Button Disaster Recovery" tape drive
4453 * which temporarily pretends to be a CD-ROM drive.
4454 * So we check that the device is really an OBDR tape
4455 * device by checking for "$DR-10" in bytes 43-48 of
4456 * the inquiry data.
4457 */
4458 if (is_OBDR)
4459 ncurrent++;
4460 break;
4461 case TYPE_DISK:
4462 case TYPE_ZBC:
4463 if (this_device->physical_device) {
4464 /* The disk is in HBA mode. */
4465 /* Never use RAID mapper in HBA mode. */
4466 this_device->offload_enabled = 0;
4467 hpsa_get_ioaccel_drive_info(h, this_device,
4468 physdev_list, phys_dev_index, id_phys);
4469 hpsa_get_path_info(this_device,
4470 physdev_list, phys_dev_index, id_phys);
4471 }
4472 ncurrent++;
4473 break;
4474 case TYPE_TAPE:
4475 case TYPE_MEDIUM_CHANGER:
4476 ncurrent++;
4477 break;
4478 case TYPE_ENCLOSURE:
4479 if (!this_device->external)
4480 hpsa_get_enclosure_info(h, lunaddrbytes,
4481 physdev_list, phys_dev_index,
4482 this_device);
4483 ncurrent++;
4484 break;
4485 case TYPE_RAID:
4486 /* Only present the Smartarray HBA as a RAID controller.
4487 * If it's a RAID controller other than the HBA itself
4488 * (an external RAID controller, MSA500 or similar)
4489 * don't present it.
4490 */
4491 if (!is_hba_lunid(lunaddrbytes))
4492 break;
4493 ncurrent++;
4494 break;
4495 default:
4496 break;
4497 }
4498 if (ncurrent >= HPSA_MAX_DEVICES)
4499 break;
4500 }
4501
4502 if (h->sas_host == NULL) {
4503 int rc = 0;
4504
4505 rc = hpsa_add_sas_host(h);
4506 if (rc) {
4507 dev_warn(&h->pdev->dev,
4508 "Could not add sas host %d\n", rc);
4509 goto out;
4510 }
4511 }
4512
4513 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4514out:
4515 kfree(tmpdevice);
4516 for (i = 0; i < ndev_allocated; i++)
4517 kfree(currentsd[i]);
4518 kfree(currentsd);
4519 kfree(physdev_list);
4520 kfree(logdev_list);
4521 kfree(id_ctlr);
4522 kfree(id_phys);
4523}
4524
4525static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4526 struct scatterlist *sg)
4527{
4528 u64 addr64 = (u64) sg_dma_address(sg);
4529 unsigned int len = sg_dma_len(sg);
4530
4531 desc->Addr = cpu_to_le64(addr64);
4532 desc->Len = cpu_to_le32(len);
4533 desc->Ext = 0;
4534}
4535
4536/*
4537 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4538 * dma mapping and fills in the scatter gather entries of the
4539 * hpsa command, cp.
4540 */
4541static int hpsa_scatter_gather(struct ctlr_info *h,
4542 struct CommandList *cp,
4543 struct scsi_cmnd *cmd)
4544{
4545 struct scatterlist *sg;
4546 int use_sg, i, sg_limit, chained, last_sg;
4547 struct SGDescriptor *curr_sg;
4548
4549 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4550
4551 use_sg = scsi_dma_map(cmd);
4552 if (use_sg < 0)
4553 return use_sg;
4554
4555 if (!use_sg)
4556 goto sglist_finished;
4557
4558 /*
4559 * If the number of entries is greater than the max for a single list,
4560 * then we have a chained list; we will set up all but one entry in the
4561 * first list (the last entry is saved for link information);
4562 * otherwise, we don't have a chained list and we'll set up at each of
4563 * the entries in the one list.
4564 */
4565 curr_sg = cp->SG;
4566 chained = use_sg > h->max_cmd_sg_entries;
4567 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4568 last_sg = scsi_sg_count(cmd) - 1;
4569 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4570 hpsa_set_sg_descriptor(curr_sg, sg);
4571 curr_sg++;
4572 }
4573
4574 if (chained) {
4575 /*
4576 * Continue with the chained list. Set curr_sg to the chained
4577 * list. Modify the limit to the total count less the entries
4578 * we've already set up. Resume the scan at the list entry
4579 * where the previous loop left off.
4580 */
4581 curr_sg = h->cmd_sg_list[cp->cmdindex];
4582 sg_limit = use_sg - sg_limit;
4583 for_each_sg(sg, sg, sg_limit, i) {
4584 hpsa_set_sg_descriptor(curr_sg, sg);
4585 curr_sg++;
4586 }
4587 }
4588
4589 /* Back the pointer up to the last entry and mark it as "last". */
4590 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4591
4592 if (use_sg + chained > h->maxSG)
4593 h->maxSG = use_sg + chained;
4594
4595 if (chained) {
4596 cp->Header.SGList = h->max_cmd_sg_entries;
4597 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4598 if (hpsa_map_sg_chain_block(h, cp)) {
4599 scsi_dma_unmap(cmd);
4600 return -1;
4601 }
4602 return 0;
4603 }
4604
4605sglist_finished:
4606
4607 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4608 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4609 return 0;
4610}
4611
4612static inline void warn_zero_length_transfer(struct ctlr_info *h,
4613 u8 *cdb, int cdb_len,
4614 const char *func)
4615{
4616 dev_warn(&h->pdev->dev,
4617 "%s: Blocking zero-length request: CDB:%*phN\n",
4618 func, cdb_len, cdb);
4619}
4620
4621#define IO_ACCEL_INELIGIBLE 1
4622/* zero-length transfers trigger hardware errors. */
4623static bool is_zero_length_transfer(u8 *cdb)
4624{
4625 u32 block_cnt;
4626
4627 /* Block zero-length transfer sizes on certain commands. */
4628 switch (cdb[0]) {
4629 case READ_10:
4630 case WRITE_10:
4631 case VERIFY: /* 0x2F */
4632 case WRITE_VERIFY: /* 0x2E */
4633 block_cnt = get_unaligned_be16(&cdb[7]);
4634 break;
4635 case READ_12:
4636 case WRITE_12:
4637 case VERIFY_12: /* 0xAF */
4638 case WRITE_VERIFY_12: /* 0xAE */
4639 block_cnt = get_unaligned_be32(&cdb[6]);
4640 break;
4641 case READ_16:
4642 case WRITE_16:
4643 case VERIFY_16: /* 0x8F */
4644 block_cnt = get_unaligned_be32(&cdb[10]);
4645 break;
4646 default:
4647 return false;
4648 }
4649
4650 return block_cnt == 0;
4651}
4652
4653static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4654{
4655 int is_write = 0;
4656 u32 block;
4657 u32 block_cnt;
4658
4659 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4660 switch (cdb[0]) {
4661 case WRITE_6:
4662 case WRITE_12:
4663 is_write = 1;
4664 /* fall through */
4665 case READ_6:
4666 case READ_12:
4667 if (*cdb_len == 6) {
4668 block = (((cdb[1] & 0x1F) << 16) |
4669 (cdb[2] << 8) |
4670 cdb[3]);
4671 block_cnt = cdb[4];
4672 if (block_cnt == 0)
4673 block_cnt = 256;
4674 } else {
4675 BUG_ON(*cdb_len != 12);
4676 block = get_unaligned_be32(&cdb[2]);
4677 block_cnt = get_unaligned_be32(&cdb[6]);
4678 }
4679 if (block_cnt > 0xffff)
4680 return IO_ACCEL_INELIGIBLE;
4681
4682 cdb[0] = is_write ? WRITE_10 : READ_10;
4683 cdb[1] = 0;
4684 cdb[2] = (u8) (block >> 24);
4685 cdb[3] = (u8) (block >> 16);
4686 cdb[4] = (u8) (block >> 8);
4687 cdb[5] = (u8) (block);
4688 cdb[6] = 0;
4689 cdb[7] = (u8) (block_cnt >> 8);
4690 cdb[8] = (u8) (block_cnt);
4691 cdb[9] = 0;
4692 *cdb_len = 10;
4693 break;
4694 }
4695 return 0;
4696}
4697
4698static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4699 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4700 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4701{
4702 struct scsi_cmnd *cmd = c->scsi_cmd;
4703 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4704 unsigned int len;
4705 unsigned int total_len = 0;
4706 struct scatterlist *sg;
4707 u64 addr64;
4708 int use_sg, i;
4709 struct SGDescriptor *curr_sg;
4710 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4711
4712 /* TODO: implement chaining support */
4713 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4714 atomic_dec(&phys_disk->ioaccel_cmds_out);
4715 return IO_ACCEL_INELIGIBLE;
4716 }
4717
4718 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4719
4720 if (is_zero_length_transfer(cdb)) {
4721 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4722 atomic_dec(&phys_disk->ioaccel_cmds_out);
4723 return IO_ACCEL_INELIGIBLE;
4724 }
4725
4726 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4727 atomic_dec(&phys_disk->ioaccel_cmds_out);
4728 return IO_ACCEL_INELIGIBLE;
4729 }
4730
4731 c->cmd_type = CMD_IOACCEL1;
4732
4733 /* Adjust the DMA address to point to the accelerated command buffer */
4734 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4735 (c->cmdindex * sizeof(*cp));
4736 BUG_ON(c->busaddr & 0x0000007F);
4737
4738 use_sg = scsi_dma_map(cmd);
4739 if (use_sg < 0) {
4740 atomic_dec(&phys_disk->ioaccel_cmds_out);
4741 return use_sg;
4742 }
4743
4744 if (use_sg) {
4745 curr_sg = cp->SG;
4746 scsi_for_each_sg(cmd, sg, use_sg, i) {
4747 addr64 = (u64) sg_dma_address(sg);
4748 len = sg_dma_len(sg);
4749 total_len += len;
4750 curr_sg->Addr = cpu_to_le64(addr64);
4751 curr_sg->Len = cpu_to_le32(len);
4752 curr_sg->Ext = cpu_to_le32(0);
4753 curr_sg++;
4754 }
4755 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4756
4757 switch (cmd->sc_data_direction) {
4758 case DMA_TO_DEVICE:
4759 control |= IOACCEL1_CONTROL_DATA_OUT;
4760 break;
4761 case DMA_FROM_DEVICE:
4762 control |= IOACCEL1_CONTROL_DATA_IN;
4763 break;
4764 case DMA_NONE:
4765 control |= IOACCEL1_CONTROL_NODATAXFER;
4766 break;
4767 default:
4768 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4769 cmd->sc_data_direction);
4770 BUG();
4771 break;
4772 }
4773 } else {
4774 control |= IOACCEL1_CONTROL_NODATAXFER;
4775 }
4776
4777 c->Header.SGList = use_sg;
4778 /* Fill out the command structure to submit */
4779 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4780 cp->transfer_len = cpu_to_le32(total_len);
4781 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4782 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4783 cp->control = cpu_to_le32(control);
4784 memcpy(cp->CDB, cdb, cdb_len);
4785 memcpy(cp->CISS_LUN, scsi3addr, 8);
4786 /* Tag was already set at init time. */
4787 enqueue_cmd_and_start_io(h, c);
4788 return 0;
4789}
4790
4791/*
4792 * Queue a command directly to a device behind the controller using the
4793 * I/O accelerator path.
4794 */
4795static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4796 struct CommandList *c)
4797{
4798 struct scsi_cmnd *cmd = c->scsi_cmd;
4799 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4800
4801 if (!dev)
4802 return -1;
4803
4804 c->phys_disk = dev;
4805
4806 if (dev->in_reset)
4807 return -1;
4808
4809 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4810 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4811}
4812
4813/*
4814 * Set encryption parameters for the ioaccel2 request
4815 */
4816static void set_encrypt_ioaccel2(struct ctlr_info *h,
4817 struct CommandList *c, struct io_accel2_cmd *cp)
4818{
4819 struct scsi_cmnd *cmd = c->scsi_cmd;
4820 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4821 struct raid_map_data *map = &dev->raid_map;
4822 u64 first_block;
4823
4824 /* Are we doing encryption on this device */
4825 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4826 return;
4827 /* Set the data encryption key index. */
4828 cp->dekindex = map->dekindex;
4829
4830 /* Set the encryption enable flag, encoded into direction field. */
4831 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4832
4833 /* Set encryption tweak values based on logical block address
4834 * If block size is 512, tweak value is LBA.
4835 * For other block sizes, tweak is (LBA * block size)/ 512)
4836 */
4837 switch (cmd->cmnd[0]) {
4838 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4839 case READ_6:
4840 case WRITE_6:
4841 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4842 (cmd->cmnd[2] << 8) |
4843 cmd->cmnd[3]);
4844 break;
4845 case WRITE_10:
4846 case READ_10:
4847 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4848 case WRITE_12:
4849 case READ_12:
4850 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4851 break;
4852 case WRITE_16:
4853 case READ_16:
4854 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4855 break;
4856 default:
4857 dev_err(&h->pdev->dev,
4858 "ERROR: %s: size (0x%x) not supported for encryption\n",
4859 __func__, cmd->cmnd[0]);
4860 BUG();
4861 break;
4862 }
4863
4864 if (le32_to_cpu(map->volume_blk_size) != 512)
4865 first_block = first_block *
4866 le32_to_cpu(map->volume_blk_size)/512;
4867
4868 cp->tweak_lower = cpu_to_le32(first_block);
4869 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4870}
4871
4872static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4873 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4874 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4875{
4876 struct scsi_cmnd *cmd = c->scsi_cmd;
4877 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4878 struct ioaccel2_sg_element *curr_sg;
4879 int use_sg, i;
4880 struct scatterlist *sg;
4881 u64 addr64;
4882 u32 len;
4883 u32 total_len = 0;
4884
4885 if (!cmd->device)
4886 return -1;
4887
4888 if (!cmd->device->hostdata)
4889 return -1;
4890
4891 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4892
4893 if (is_zero_length_transfer(cdb)) {
4894 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4895 atomic_dec(&phys_disk->ioaccel_cmds_out);
4896 return IO_ACCEL_INELIGIBLE;
4897 }
4898
4899 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4900 atomic_dec(&phys_disk->ioaccel_cmds_out);
4901 return IO_ACCEL_INELIGIBLE;
4902 }
4903
4904 c->cmd_type = CMD_IOACCEL2;
4905 /* Adjust the DMA address to point to the accelerated command buffer */
4906 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4907 (c->cmdindex * sizeof(*cp));
4908 BUG_ON(c->busaddr & 0x0000007F);
4909
4910 memset(cp, 0, sizeof(*cp));
4911 cp->IU_type = IOACCEL2_IU_TYPE;
4912
4913 use_sg = scsi_dma_map(cmd);
4914 if (use_sg < 0) {
4915 atomic_dec(&phys_disk->ioaccel_cmds_out);
4916 return use_sg;
4917 }
4918
4919 if (use_sg) {
4920 curr_sg = cp->sg;
4921 if (use_sg > h->ioaccel_maxsg) {
4922 addr64 = le64_to_cpu(
4923 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4924 curr_sg->address = cpu_to_le64(addr64);
4925 curr_sg->length = 0;
4926 curr_sg->reserved[0] = 0;
4927 curr_sg->reserved[1] = 0;
4928 curr_sg->reserved[2] = 0;
4929 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4930
4931 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4932 }
4933 scsi_for_each_sg(cmd, sg, use_sg, i) {
4934 addr64 = (u64) sg_dma_address(sg);
4935 len = sg_dma_len(sg);
4936 total_len += len;
4937 curr_sg->address = cpu_to_le64(addr64);
4938 curr_sg->length = cpu_to_le32(len);
4939 curr_sg->reserved[0] = 0;
4940 curr_sg->reserved[1] = 0;
4941 curr_sg->reserved[2] = 0;
4942 curr_sg->chain_indicator = 0;
4943 curr_sg++;
4944 }
4945
4946 /*
4947 * Set the last s/g element bit
4948 */
4949 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4950
4951 switch (cmd->sc_data_direction) {
4952 case DMA_TO_DEVICE:
4953 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4954 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4955 break;
4956 case DMA_FROM_DEVICE:
4957 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4958 cp->direction |= IOACCEL2_DIR_DATA_IN;
4959 break;
4960 case DMA_NONE:
4961 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4962 cp->direction |= IOACCEL2_DIR_NO_DATA;
4963 break;
4964 default:
4965 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4966 cmd->sc_data_direction);
4967 BUG();
4968 break;
4969 }
4970 } else {
4971 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4972 cp->direction |= IOACCEL2_DIR_NO_DATA;
4973 }
4974
4975 /* Set encryption parameters, if necessary */
4976 set_encrypt_ioaccel2(h, c, cp);
4977
4978 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4979 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4980 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4981
4982 cp->data_len = cpu_to_le32(total_len);
4983 cp->err_ptr = cpu_to_le64(c->busaddr +
4984 offsetof(struct io_accel2_cmd, error_data));
4985 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4986
4987 /* fill in sg elements */
4988 if (use_sg > h->ioaccel_maxsg) {
4989 cp->sg_count = 1;
4990 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4991 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4992 atomic_dec(&phys_disk->ioaccel_cmds_out);
4993 scsi_dma_unmap(cmd);
4994 return -1;
4995 }
4996 } else
4997 cp->sg_count = (u8) use_sg;
4998
4999 if (phys_disk->in_reset) {
5000 cmd->result = DID_RESET << 16;
5001 return -1;
5002 }
5003
5004 enqueue_cmd_and_start_io(h, c);
5005 return 0;
5006}
5007
5008/*
5009 * Queue a command to the correct I/O accelerator path.
5010 */
5011static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5012 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5013 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5014{
5015 if (!c->scsi_cmd->device)
5016 return -1;
5017
5018 if (!c->scsi_cmd->device->hostdata)
5019 return -1;
5020
5021 if (phys_disk->in_reset)
5022 return -1;
5023
5024 /* Try to honor the device's queue depth */
5025 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5026 phys_disk->queue_depth) {
5027 atomic_dec(&phys_disk->ioaccel_cmds_out);
5028 return IO_ACCEL_INELIGIBLE;
5029 }
5030 if (h->transMethod & CFGTBL_Trans_io_accel1)
5031 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5032 cdb, cdb_len, scsi3addr,
5033 phys_disk);
5034 else
5035 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5036 cdb, cdb_len, scsi3addr,
5037 phys_disk);
5038}
5039
5040static void raid_map_helper(struct raid_map_data *map,
5041 int offload_to_mirror, u32 *map_index, u32 *current_group)
5042{
5043 if (offload_to_mirror == 0) {
5044 /* use physical disk in the first mirrored group. */
5045 *map_index %= le16_to_cpu(map->data_disks_per_row);
5046 return;
5047 }
5048 do {
5049 /* determine mirror group that *map_index indicates */
5050 *current_group = *map_index /
5051 le16_to_cpu(map->data_disks_per_row);
5052 if (offload_to_mirror == *current_group)
5053 continue;
5054 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5055 /* select map index from next group */
5056 *map_index += le16_to_cpu(map->data_disks_per_row);
5057 (*current_group)++;
5058 } else {
5059 /* select map index from first group */
5060 *map_index %= le16_to_cpu(map->data_disks_per_row);
5061 *current_group = 0;
5062 }
5063 } while (offload_to_mirror != *current_group);
5064}
5065
5066/*
5067 * Attempt to perform offload RAID mapping for a logical volume I/O.
5068 */
5069static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5070 struct CommandList *c)
5071{
5072 struct scsi_cmnd *cmd = c->scsi_cmd;
5073 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5074 struct raid_map_data *map = &dev->raid_map;
5075 struct raid_map_disk_data *dd = &map->data[0];
5076 int is_write = 0;
5077 u32 map_index;
5078 u64 first_block, last_block;
5079 u32 block_cnt;
5080 u32 blocks_per_row;
5081 u64 first_row, last_row;
5082 u32 first_row_offset, last_row_offset;
5083 u32 first_column, last_column;
5084 u64 r0_first_row, r0_last_row;
5085 u32 r5or6_blocks_per_row;
5086 u64 r5or6_first_row, r5or6_last_row;
5087 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5088 u32 r5or6_first_column, r5or6_last_column;
5089 u32 total_disks_per_row;
5090 u32 stripesize;
5091 u32 first_group, last_group, current_group;
5092 u32 map_row;
5093 u32 disk_handle;
5094 u64 disk_block;
5095 u32 disk_block_cnt;
5096 u8 cdb[16];
5097 u8 cdb_len;
5098 u16 strip_size;
5099#if BITS_PER_LONG == 32
5100 u64 tmpdiv;
5101#endif
5102 int offload_to_mirror;
5103
5104 if (!dev)
5105 return -1;
5106
5107 if (dev->in_reset)
5108 return -1;
5109
5110 /* check for valid opcode, get LBA and block count */
5111 switch (cmd->cmnd[0]) {
5112 case WRITE_6:
5113 is_write = 1;
5114 /* fall through */
5115 case READ_6:
5116 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5117 (cmd->cmnd[2] << 8) |
5118 cmd->cmnd[3]);
5119 block_cnt = cmd->cmnd[4];
5120 if (block_cnt == 0)
5121 block_cnt = 256;
5122 break;
5123 case WRITE_10:
5124 is_write = 1;
5125 /* fall through */
5126 case READ_10:
5127 first_block =
5128 (((u64) cmd->cmnd[2]) << 24) |
5129 (((u64) cmd->cmnd[3]) << 16) |
5130 (((u64) cmd->cmnd[4]) << 8) |
5131 cmd->cmnd[5];
5132 block_cnt =
5133 (((u32) cmd->cmnd[7]) << 8) |
5134 cmd->cmnd[8];
5135 break;
5136 case WRITE_12:
5137 is_write = 1;
5138 /* fall through */
5139 case READ_12:
5140 first_block =
5141 (((u64) cmd->cmnd[2]) << 24) |
5142 (((u64) cmd->cmnd[3]) << 16) |
5143 (((u64) cmd->cmnd[4]) << 8) |
5144 cmd->cmnd[5];
5145 block_cnt =
5146 (((u32) cmd->cmnd[6]) << 24) |
5147 (((u32) cmd->cmnd[7]) << 16) |
5148 (((u32) cmd->cmnd[8]) << 8) |
5149 cmd->cmnd[9];
5150 break;
5151 case WRITE_16:
5152 is_write = 1;
5153 /* fall through */
5154 case READ_16:
5155 first_block =
5156 (((u64) cmd->cmnd[2]) << 56) |
5157 (((u64) cmd->cmnd[3]) << 48) |
5158 (((u64) cmd->cmnd[4]) << 40) |
5159 (((u64) cmd->cmnd[5]) << 32) |
5160 (((u64) cmd->cmnd[6]) << 24) |
5161 (((u64) cmd->cmnd[7]) << 16) |
5162 (((u64) cmd->cmnd[8]) << 8) |
5163 cmd->cmnd[9];
5164 block_cnt =
5165 (((u32) cmd->cmnd[10]) << 24) |
5166 (((u32) cmd->cmnd[11]) << 16) |
5167 (((u32) cmd->cmnd[12]) << 8) |
5168 cmd->cmnd[13];
5169 break;
5170 default:
5171 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5172 }
5173 last_block = first_block + block_cnt - 1;
5174
5175 /* check for write to non-RAID-0 */
5176 if (is_write && dev->raid_level != 0)
5177 return IO_ACCEL_INELIGIBLE;
5178
5179 /* check for invalid block or wraparound */
5180 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5181 last_block < first_block)
5182 return IO_ACCEL_INELIGIBLE;
5183
5184 /* calculate stripe information for the request */
5185 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5186 le16_to_cpu(map->strip_size);
5187 strip_size = le16_to_cpu(map->strip_size);
5188#if BITS_PER_LONG == 32
5189 tmpdiv = first_block;
5190 (void) do_div(tmpdiv, blocks_per_row);
5191 first_row = tmpdiv;
5192 tmpdiv = last_block;
5193 (void) do_div(tmpdiv, blocks_per_row);
5194 last_row = tmpdiv;
5195 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5196 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5197 tmpdiv = first_row_offset;
5198 (void) do_div(tmpdiv, strip_size);
5199 first_column = tmpdiv;
5200 tmpdiv = last_row_offset;
5201 (void) do_div(tmpdiv, strip_size);
5202 last_column = tmpdiv;
5203#else
5204 first_row = first_block / blocks_per_row;
5205 last_row = last_block / blocks_per_row;
5206 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5207 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5208 first_column = first_row_offset / strip_size;
5209 last_column = last_row_offset / strip_size;
5210#endif
5211
5212 /* if this isn't a single row/column then give to the controller */
5213 if ((first_row != last_row) || (first_column != last_column))
5214 return IO_ACCEL_INELIGIBLE;
5215
5216 /* proceeding with driver mapping */
5217 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5218 le16_to_cpu(map->metadata_disks_per_row);
5219 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5220 le16_to_cpu(map->row_cnt);
5221 map_index = (map_row * total_disks_per_row) + first_column;
5222
5223 switch (dev->raid_level) {
5224 case HPSA_RAID_0:
5225 break; /* nothing special to do */
5226 case HPSA_RAID_1:
5227 /* Handles load balance across RAID 1 members.
5228 * (2-drive R1 and R10 with even # of drives.)
5229 * Appropriate for SSDs, not optimal for HDDs
5230 */
5231 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5232 if (dev->offload_to_mirror)
5233 map_index += le16_to_cpu(map->data_disks_per_row);
5234 dev->offload_to_mirror = !dev->offload_to_mirror;
5235 break;
5236 case HPSA_RAID_ADM:
5237 /* Handles N-way mirrors (R1-ADM)
5238 * and R10 with # of drives divisible by 3.)
5239 */
5240 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5241
5242 offload_to_mirror = dev->offload_to_mirror;
5243 raid_map_helper(map, offload_to_mirror,
5244 &map_index, ¤t_group);
5245 /* set mirror group to use next time */
5246 offload_to_mirror =
5247 (offload_to_mirror >=
5248 le16_to_cpu(map->layout_map_count) - 1)
5249 ? 0 : offload_to_mirror + 1;
5250 dev->offload_to_mirror = offload_to_mirror;
5251 /* Avoid direct use of dev->offload_to_mirror within this
5252 * function since multiple threads might simultaneously
5253 * increment it beyond the range of dev->layout_map_count -1.
5254 */
5255 break;
5256 case HPSA_RAID_5:
5257 case HPSA_RAID_6:
5258 if (le16_to_cpu(map->layout_map_count) <= 1)
5259 break;
5260
5261 /* Verify first and last block are in same RAID group */
5262 r5or6_blocks_per_row =
5263 le16_to_cpu(map->strip_size) *
5264 le16_to_cpu(map->data_disks_per_row);
5265 BUG_ON(r5or6_blocks_per_row == 0);
5266 stripesize = r5or6_blocks_per_row *
5267 le16_to_cpu(map->layout_map_count);
5268#if BITS_PER_LONG == 32
5269 tmpdiv = first_block;
5270 first_group = do_div(tmpdiv, stripesize);
5271 tmpdiv = first_group;
5272 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5273 first_group = tmpdiv;
5274 tmpdiv = last_block;
5275 last_group = do_div(tmpdiv, stripesize);
5276 tmpdiv = last_group;
5277 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5278 last_group = tmpdiv;
5279#else
5280 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5281 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5282#endif
5283 if (first_group != last_group)
5284 return IO_ACCEL_INELIGIBLE;
5285
5286 /* Verify request is in a single row of RAID 5/6 */
5287#if BITS_PER_LONG == 32
5288 tmpdiv = first_block;
5289 (void) do_div(tmpdiv, stripesize);
5290 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5291 tmpdiv = last_block;
5292 (void) do_div(tmpdiv, stripesize);
5293 r5or6_last_row = r0_last_row = tmpdiv;
5294#else
5295 first_row = r5or6_first_row = r0_first_row =
5296 first_block / stripesize;
5297 r5or6_last_row = r0_last_row = last_block / stripesize;
5298#endif
5299 if (r5or6_first_row != r5or6_last_row)
5300 return IO_ACCEL_INELIGIBLE;
5301
5302
5303 /* Verify request is in a single column */
5304#if BITS_PER_LONG == 32
5305 tmpdiv = first_block;
5306 first_row_offset = do_div(tmpdiv, stripesize);
5307 tmpdiv = first_row_offset;
5308 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5309 r5or6_first_row_offset = first_row_offset;
5310 tmpdiv = last_block;
5311 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5312 tmpdiv = r5or6_last_row_offset;
5313 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5314 tmpdiv = r5or6_first_row_offset;
5315 (void) do_div(tmpdiv, map->strip_size);
5316 first_column = r5or6_first_column = tmpdiv;
5317 tmpdiv = r5or6_last_row_offset;
5318 (void) do_div(tmpdiv, map->strip_size);
5319 r5or6_last_column = tmpdiv;
5320#else
5321 first_row_offset = r5or6_first_row_offset =
5322 (u32)((first_block % stripesize) %
5323 r5or6_blocks_per_row);
5324
5325 r5or6_last_row_offset =
5326 (u32)((last_block % stripesize) %
5327 r5or6_blocks_per_row);
5328
5329 first_column = r5or6_first_column =
5330 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5331 r5or6_last_column =
5332 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5333#endif
5334 if (r5or6_first_column != r5or6_last_column)
5335 return IO_ACCEL_INELIGIBLE;
5336
5337 /* Request is eligible */
5338 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5339 le16_to_cpu(map->row_cnt);
5340
5341 map_index = (first_group *
5342 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5343 (map_row * total_disks_per_row) + first_column;
5344 break;
5345 default:
5346 return IO_ACCEL_INELIGIBLE;
5347 }
5348
5349 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5350 return IO_ACCEL_INELIGIBLE;
5351
5352 c->phys_disk = dev->phys_disk[map_index];
5353 if (!c->phys_disk)
5354 return IO_ACCEL_INELIGIBLE;
5355
5356 disk_handle = dd[map_index].ioaccel_handle;
5357 disk_block = le64_to_cpu(map->disk_starting_blk) +
5358 first_row * le16_to_cpu(map->strip_size) +
5359 (first_row_offset - first_column *
5360 le16_to_cpu(map->strip_size));
5361 disk_block_cnt = block_cnt;
5362
5363 /* handle differing logical/physical block sizes */
5364 if (map->phys_blk_shift) {
5365 disk_block <<= map->phys_blk_shift;
5366 disk_block_cnt <<= map->phys_blk_shift;
5367 }
5368 BUG_ON(disk_block_cnt > 0xffff);
5369
5370 /* build the new CDB for the physical disk I/O */
5371 if (disk_block > 0xffffffff) {
5372 cdb[0] = is_write ? WRITE_16 : READ_16;
5373 cdb[1] = 0;
5374 cdb[2] = (u8) (disk_block >> 56);
5375 cdb[3] = (u8) (disk_block >> 48);
5376 cdb[4] = (u8) (disk_block >> 40);
5377 cdb[5] = (u8) (disk_block >> 32);
5378 cdb[6] = (u8) (disk_block >> 24);
5379 cdb[7] = (u8) (disk_block >> 16);
5380 cdb[8] = (u8) (disk_block >> 8);
5381 cdb[9] = (u8) (disk_block);
5382 cdb[10] = (u8) (disk_block_cnt >> 24);
5383 cdb[11] = (u8) (disk_block_cnt >> 16);
5384 cdb[12] = (u8) (disk_block_cnt >> 8);
5385 cdb[13] = (u8) (disk_block_cnt);
5386 cdb[14] = 0;
5387 cdb[15] = 0;
5388 cdb_len = 16;
5389 } else {
5390 cdb[0] = is_write ? WRITE_10 : READ_10;
5391 cdb[1] = 0;
5392 cdb[2] = (u8) (disk_block >> 24);
5393 cdb[3] = (u8) (disk_block >> 16);
5394 cdb[4] = (u8) (disk_block >> 8);
5395 cdb[5] = (u8) (disk_block);
5396 cdb[6] = 0;
5397 cdb[7] = (u8) (disk_block_cnt >> 8);
5398 cdb[8] = (u8) (disk_block_cnt);
5399 cdb[9] = 0;
5400 cdb_len = 10;
5401 }
5402 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5403 dev->scsi3addr,
5404 dev->phys_disk[map_index]);
5405}
5406
5407/*
5408 * Submit commands down the "normal" RAID stack path
5409 * All callers to hpsa_ciss_submit must check lockup_detected
5410 * beforehand, before (opt.) and after calling cmd_alloc
5411 */
5412static int hpsa_ciss_submit(struct ctlr_info *h,
5413 struct CommandList *c, struct scsi_cmnd *cmd,
5414 struct hpsa_scsi_dev_t *dev)
5415{
5416 cmd->host_scribble = (unsigned char *) c;
5417 c->cmd_type = CMD_SCSI;
5418 c->scsi_cmd = cmd;
5419 c->Header.ReplyQueue = 0; /* unused in simple mode */
5420 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5421 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5422
5423 /* Fill in the request block... */
5424
5425 c->Request.Timeout = 0;
5426 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5427 c->Request.CDBLen = cmd->cmd_len;
5428 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5429 switch (cmd->sc_data_direction) {
5430 case DMA_TO_DEVICE:
5431 c->Request.type_attr_dir =
5432 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5433 break;
5434 case DMA_FROM_DEVICE:
5435 c->Request.type_attr_dir =
5436 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5437 break;
5438 case DMA_NONE:
5439 c->Request.type_attr_dir =
5440 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5441 break;
5442 case DMA_BIDIRECTIONAL:
5443 /* This can happen if a buggy application does a scsi passthru
5444 * and sets both inlen and outlen to non-zero. ( see
5445 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5446 */
5447
5448 c->Request.type_attr_dir =
5449 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5450 /* This is technically wrong, and hpsa controllers should
5451 * reject it with CMD_INVALID, which is the most correct
5452 * response, but non-fibre backends appear to let it
5453 * slide by, and give the same results as if this field
5454 * were set correctly. Either way is acceptable for
5455 * our purposes here.
5456 */
5457
5458 break;
5459
5460 default:
5461 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5462 cmd->sc_data_direction);
5463 BUG();
5464 break;
5465 }
5466
5467 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5468 hpsa_cmd_resolve_and_free(h, c);
5469 return SCSI_MLQUEUE_HOST_BUSY;
5470 }
5471
5472 if (dev->in_reset) {
5473 hpsa_cmd_resolve_and_free(h, c);
5474 return SCSI_MLQUEUE_HOST_BUSY;
5475 }
5476
5477 enqueue_cmd_and_start_io(h, c);
5478 /* the cmd'll come back via intr handler in complete_scsi_command() */
5479 return 0;
5480}
5481
5482static void hpsa_cmd_init(struct ctlr_info *h, int index,
5483 struct CommandList *c)
5484{
5485 dma_addr_t cmd_dma_handle, err_dma_handle;
5486
5487 /* Zero out all of commandlist except the last field, refcount */
5488 memset(c, 0, offsetof(struct CommandList, refcount));
5489 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5490 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5491 c->err_info = h->errinfo_pool + index;
5492 memset(c->err_info, 0, sizeof(*c->err_info));
5493 err_dma_handle = h->errinfo_pool_dhandle
5494 + index * sizeof(*c->err_info);
5495 c->cmdindex = index;
5496 c->busaddr = (u32) cmd_dma_handle;
5497 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5498 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5499 c->h = h;
5500 c->scsi_cmd = SCSI_CMD_IDLE;
5501}
5502
5503static void hpsa_preinitialize_commands(struct ctlr_info *h)
5504{
5505 int i;
5506
5507 for (i = 0; i < h->nr_cmds; i++) {
5508 struct CommandList *c = h->cmd_pool + i;
5509
5510 hpsa_cmd_init(h, i, c);
5511 atomic_set(&c->refcount, 0);
5512 }
5513}
5514
5515static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5516 struct CommandList *c)
5517{
5518 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5519
5520 BUG_ON(c->cmdindex != index);
5521
5522 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5523 memset(c->err_info, 0, sizeof(*c->err_info));
5524 c->busaddr = (u32) cmd_dma_handle;
5525}
5526
5527static int hpsa_ioaccel_submit(struct ctlr_info *h,
5528 struct CommandList *c, struct scsi_cmnd *cmd)
5529{
5530 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5531 int rc = IO_ACCEL_INELIGIBLE;
5532
5533 if (!dev)
5534 return SCSI_MLQUEUE_HOST_BUSY;
5535
5536 if (dev->in_reset)
5537 return SCSI_MLQUEUE_HOST_BUSY;
5538
5539 if (hpsa_simple_mode)
5540 return IO_ACCEL_INELIGIBLE;
5541
5542 cmd->host_scribble = (unsigned char *) c;
5543
5544 if (dev->offload_enabled) {
5545 hpsa_cmd_init(h, c->cmdindex, c);
5546 c->cmd_type = CMD_SCSI;
5547 c->scsi_cmd = cmd;
5548 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5549 if (rc < 0) /* scsi_dma_map failed. */
5550 rc = SCSI_MLQUEUE_HOST_BUSY;
5551 } else if (dev->hba_ioaccel_enabled) {
5552 hpsa_cmd_init(h, c->cmdindex, c);
5553 c->cmd_type = CMD_SCSI;
5554 c->scsi_cmd = cmd;
5555 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5556 if (rc < 0) /* scsi_dma_map failed. */
5557 rc = SCSI_MLQUEUE_HOST_BUSY;
5558 }
5559 return rc;
5560}
5561
5562static void hpsa_command_resubmit_worker(struct work_struct *work)
5563{
5564 struct scsi_cmnd *cmd;
5565 struct hpsa_scsi_dev_t *dev;
5566 struct CommandList *c = container_of(work, struct CommandList, work);
5567
5568 cmd = c->scsi_cmd;
5569 dev = cmd->device->hostdata;
5570 if (!dev) {
5571 cmd->result = DID_NO_CONNECT << 16;
5572 return hpsa_cmd_free_and_done(c->h, c, cmd);
5573 }
5574
5575 if (dev->in_reset) {
5576 cmd->result = DID_RESET << 16;
5577 return hpsa_cmd_free_and_done(c->h, c, cmd);
5578 }
5579
5580 if (c->cmd_type == CMD_IOACCEL2) {
5581 struct ctlr_info *h = c->h;
5582 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5583 int rc;
5584
5585 if (c2->error_data.serv_response ==
5586 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5587 rc = hpsa_ioaccel_submit(h, c, cmd);
5588 if (rc == 0)
5589 return;
5590 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5591 /*
5592 * If we get here, it means dma mapping failed.
5593 * Try again via scsi mid layer, which will
5594 * then get SCSI_MLQUEUE_HOST_BUSY.
5595 */
5596 cmd->result = DID_IMM_RETRY << 16;
5597 return hpsa_cmd_free_and_done(h, c, cmd);
5598 }
5599 /* else, fall thru and resubmit down CISS path */
5600 }
5601 }
5602 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5603 if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5604 /*
5605 * If we get here, it means dma mapping failed. Try
5606 * again via scsi mid layer, which will then get
5607 * SCSI_MLQUEUE_HOST_BUSY.
5608 *
5609 * hpsa_ciss_submit will have already freed c
5610 * if it encountered a dma mapping failure.
5611 */
5612 cmd->result = DID_IMM_RETRY << 16;
5613 cmd->scsi_done(cmd);
5614 }
5615}
5616
5617/* Running in struct Scsi_Host->host_lock less mode */
5618static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5619{
5620 struct ctlr_info *h;
5621 struct hpsa_scsi_dev_t *dev;
5622 struct CommandList *c;
5623 int rc = 0;
5624
5625 /* Get the ptr to our adapter structure out of cmd->host. */
5626 h = sdev_to_hba(cmd->device);
5627
5628 BUG_ON(cmd->request->tag < 0);
5629
5630 dev = cmd->device->hostdata;
5631 if (!dev) {
5632 cmd->result = DID_NO_CONNECT << 16;
5633 cmd->scsi_done(cmd);
5634 return 0;
5635 }
5636
5637 if (dev->removed) {
5638 cmd->result = DID_NO_CONNECT << 16;
5639 cmd->scsi_done(cmd);
5640 return 0;
5641 }
5642
5643 if (unlikely(lockup_detected(h))) {
5644 cmd->result = DID_NO_CONNECT << 16;
5645 cmd->scsi_done(cmd);
5646 return 0;
5647 }
5648
5649 if (dev->in_reset)
5650 return SCSI_MLQUEUE_DEVICE_BUSY;
5651
5652 c = cmd_tagged_alloc(h, cmd);
5653 if (c == NULL)
5654 return SCSI_MLQUEUE_DEVICE_BUSY;
5655
5656 /*
5657 * Call alternate submit routine for I/O accelerated commands.
5658 * Retries always go down the normal I/O path.
5659 */
5660 if (likely(cmd->retries == 0 &&
5661 !blk_rq_is_passthrough(cmd->request) &&
5662 h->acciopath_status)) {
5663 rc = hpsa_ioaccel_submit(h, c, cmd);
5664 if (rc == 0)
5665 return 0;
5666 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5667 hpsa_cmd_resolve_and_free(h, c);
5668 return SCSI_MLQUEUE_HOST_BUSY;
5669 }
5670 }
5671 return hpsa_ciss_submit(h, c, cmd, dev);
5672}
5673
5674static void hpsa_scan_complete(struct ctlr_info *h)
5675{
5676 unsigned long flags;
5677
5678 spin_lock_irqsave(&h->scan_lock, flags);
5679 h->scan_finished = 1;
5680 wake_up(&h->scan_wait_queue);
5681 spin_unlock_irqrestore(&h->scan_lock, flags);
5682}
5683
5684static void hpsa_scan_start(struct Scsi_Host *sh)
5685{
5686 struct ctlr_info *h = shost_to_hba(sh);
5687 unsigned long flags;
5688
5689 /*
5690 * Don't let rescans be initiated on a controller known to be locked
5691 * up. If the controller locks up *during* a rescan, that thread is
5692 * probably hosed, but at least we can prevent new rescan threads from
5693 * piling up on a locked up controller.
5694 */
5695 if (unlikely(lockup_detected(h)))
5696 return hpsa_scan_complete(h);
5697
5698 /*
5699 * If a scan is already waiting to run, no need to add another
5700 */
5701 spin_lock_irqsave(&h->scan_lock, flags);
5702 if (h->scan_waiting) {
5703 spin_unlock_irqrestore(&h->scan_lock, flags);
5704 return;
5705 }
5706
5707 spin_unlock_irqrestore(&h->scan_lock, flags);
5708
5709 /* wait until any scan already in progress is finished. */
5710 while (1) {
5711 spin_lock_irqsave(&h->scan_lock, flags);
5712 if (h->scan_finished)
5713 break;
5714 h->scan_waiting = 1;
5715 spin_unlock_irqrestore(&h->scan_lock, flags);
5716 wait_event(h->scan_wait_queue, h->scan_finished);
5717 /* Note: We don't need to worry about a race between this
5718 * thread and driver unload because the midlayer will
5719 * have incremented the reference count, so unload won't
5720 * happen if we're in here.
5721 */
5722 }
5723 h->scan_finished = 0; /* mark scan as in progress */
5724 h->scan_waiting = 0;
5725 spin_unlock_irqrestore(&h->scan_lock, flags);
5726
5727 if (unlikely(lockup_detected(h)))
5728 return hpsa_scan_complete(h);
5729
5730 /*
5731 * Do the scan after a reset completion
5732 */
5733 spin_lock_irqsave(&h->reset_lock, flags);
5734 if (h->reset_in_progress) {
5735 h->drv_req_rescan = 1;
5736 spin_unlock_irqrestore(&h->reset_lock, flags);
5737 hpsa_scan_complete(h);
5738 return;
5739 }
5740 spin_unlock_irqrestore(&h->reset_lock, flags);
5741
5742 hpsa_update_scsi_devices(h);
5743
5744 hpsa_scan_complete(h);
5745}
5746
5747static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5748{
5749 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5750
5751 if (!logical_drive)
5752 return -ENODEV;
5753
5754 if (qdepth < 1)
5755 qdepth = 1;
5756 else if (qdepth > logical_drive->queue_depth)
5757 qdepth = logical_drive->queue_depth;
5758
5759 return scsi_change_queue_depth(sdev, qdepth);
5760}
5761
5762static int hpsa_scan_finished(struct Scsi_Host *sh,
5763 unsigned long elapsed_time)
5764{
5765 struct ctlr_info *h = shost_to_hba(sh);
5766 unsigned long flags;
5767 int finished;
5768
5769 spin_lock_irqsave(&h->scan_lock, flags);
5770 finished = h->scan_finished;
5771 spin_unlock_irqrestore(&h->scan_lock, flags);
5772 return finished;
5773}
5774
5775static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5776{
5777 struct Scsi_Host *sh;
5778
5779 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5780 if (sh == NULL) {
5781 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5782 return -ENOMEM;
5783 }
5784
5785 sh->io_port = 0;
5786 sh->n_io_port = 0;
5787 sh->this_id = -1;
5788 sh->max_channel = 3;
5789 sh->max_cmd_len = MAX_COMMAND_SIZE;
5790 sh->max_lun = HPSA_MAX_LUN;
5791 sh->max_id = HPSA_MAX_LUN;
5792 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5793 sh->cmd_per_lun = sh->can_queue;
5794 sh->sg_tablesize = h->maxsgentries;
5795 sh->transportt = hpsa_sas_transport_template;
5796 sh->hostdata[0] = (unsigned long) h;
5797 sh->irq = pci_irq_vector(h->pdev, 0);
5798 sh->unique_id = sh->irq;
5799
5800 h->scsi_host = sh;
5801 return 0;
5802}
5803
5804static int hpsa_scsi_add_host(struct ctlr_info *h)
5805{
5806 int rv;
5807
5808 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5809 if (rv) {
5810 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5811 return rv;
5812 }
5813 scsi_scan_host(h->scsi_host);
5814 return 0;
5815}
5816
5817/*
5818 * The block layer has already gone to the trouble of picking out a unique,
5819 * small-integer tag for this request. We use an offset from that value as
5820 * an index to select our command block. (The offset allows us to reserve the
5821 * low-numbered entries for our own uses.)
5822 */
5823static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5824{
5825 int idx = scmd->request->tag;
5826
5827 if (idx < 0)
5828 return idx;
5829
5830 /* Offset to leave space for internal cmds. */
5831 return idx += HPSA_NRESERVED_CMDS;
5832}
5833
5834/*
5835 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5836 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5837 */
5838static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5839 struct CommandList *c, unsigned char lunaddr[],
5840 int reply_queue)
5841{
5842 int rc;
5843
5844 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5845 (void) fill_cmd(c, TEST_UNIT_READY, h,
5846 NULL, 0, 0, lunaddr, TYPE_CMD);
5847 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5848 if (rc)
5849 return rc;
5850 /* no unmap needed here because no data xfer. */
5851
5852 /* Check if the unit is already ready. */
5853 if (c->err_info->CommandStatus == CMD_SUCCESS)
5854 return 0;
5855
5856 /*
5857 * The first command sent after reset will receive "unit attention" to
5858 * indicate that the LUN has been reset...this is actually what we're
5859 * looking for (but, success is good too).
5860 */
5861 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5862 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5863 (c->err_info->SenseInfo[2] == NO_SENSE ||
5864 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5865 return 0;
5866
5867 return 1;
5868}
5869
5870/*
5871 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5872 * returns zero when the unit is ready, and non-zero when giving up.
5873 */
5874static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5875 struct CommandList *c,
5876 unsigned char lunaddr[], int reply_queue)
5877{
5878 int rc;
5879 int count = 0;
5880 int waittime = 1; /* seconds */
5881
5882 /* Send test unit ready until device ready, or give up. */
5883 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5884
5885 /*
5886 * Wait for a bit. do this first, because if we send
5887 * the TUR right away, the reset will just abort it.
5888 */
5889 msleep(1000 * waittime);
5890
5891 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5892 if (!rc)
5893 break;
5894
5895 /* Increase wait time with each try, up to a point. */
5896 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5897 waittime *= 2;
5898
5899 dev_warn(&h->pdev->dev,
5900 "waiting %d secs for device to become ready.\n",
5901 waittime);
5902 }
5903
5904 return rc;
5905}
5906
5907static int wait_for_device_to_become_ready(struct ctlr_info *h,
5908 unsigned char lunaddr[],
5909 int reply_queue)
5910{
5911 int first_queue;
5912 int last_queue;
5913 int rq;
5914 int rc = 0;
5915 struct CommandList *c;
5916
5917 c = cmd_alloc(h);
5918
5919 /*
5920 * If no specific reply queue was requested, then send the TUR
5921 * repeatedly, requesting a reply on each reply queue; otherwise execute
5922 * the loop exactly once using only the specified queue.
5923 */
5924 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5925 first_queue = 0;
5926 last_queue = h->nreply_queues - 1;
5927 } else {
5928 first_queue = reply_queue;
5929 last_queue = reply_queue;
5930 }
5931
5932 for (rq = first_queue; rq <= last_queue; rq++) {
5933 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5934 if (rc)
5935 break;
5936 }
5937
5938 if (rc)
5939 dev_warn(&h->pdev->dev, "giving up on device.\n");
5940 else
5941 dev_warn(&h->pdev->dev, "device is ready.\n");
5942
5943 cmd_free(h, c);
5944 return rc;
5945}
5946
5947/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5948 * complaining. Doing a host- or bus-reset can't do anything good here.
5949 */
5950static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5951{
5952 int rc = SUCCESS;
5953 int i;
5954 struct ctlr_info *h;
5955 struct hpsa_scsi_dev_t *dev = NULL;
5956 u8 reset_type;
5957 char msg[48];
5958 unsigned long flags;
5959
5960 /* find the controller to which the command to be aborted was sent */
5961 h = sdev_to_hba(scsicmd->device);
5962 if (h == NULL) /* paranoia */
5963 return FAILED;
5964
5965 spin_lock_irqsave(&h->reset_lock, flags);
5966 h->reset_in_progress = 1;
5967 spin_unlock_irqrestore(&h->reset_lock, flags);
5968
5969 if (lockup_detected(h)) {
5970 rc = FAILED;
5971 goto return_reset_status;
5972 }
5973
5974 dev = scsicmd->device->hostdata;
5975 if (!dev) {
5976 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5977 rc = FAILED;
5978 goto return_reset_status;
5979 }
5980
5981 if (dev->devtype == TYPE_ENCLOSURE) {
5982 rc = SUCCESS;
5983 goto return_reset_status;
5984 }
5985
5986 /* if controller locked up, we can guarantee command won't complete */
5987 if (lockup_detected(h)) {
5988 snprintf(msg, sizeof(msg),
5989 "cmd %d RESET FAILED, lockup detected",
5990 hpsa_get_cmd_index(scsicmd));
5991 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5992 rc = FAILED;
5993 goto return_reset_status;
5994 }
5995
5996 /* this reset request might be the result of a lockup; check */
5997 if (detect_controller_lockup(h)) {
5998 snprintf(msg, sizeof(msg),
5999 "cmd %d RESET FAILED, new lockup detected",
6000 hpsa_get_cmd_index(scsicmd));
6001 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6002 rc = FAILED;
6003 goto return_reset_status;
6004 }
6005
6006 /* Do not attempt on controller */
6007 if (is_hba_lunid(dev->scsi3addr)) {
6008 rc = SUCCESS;
6009 goto return_reset_status;
6010 }
6011
6012 if (is_logical_dev_addr_mode(dev->scsi3addr))
6013 reset_type = HPSA_DEVICE_RESET_MSG;
6014 else
6015 reset_type = HPSA_PHYS_TARGET_RESET;
6016
6017 sprintf(msg, "resetting %s",
6018 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6019 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6020
6021 /*
6022 * wait to see if any commands will complete before sending reset
6023 */
6024 dev->in_reset = true; /* block any new cmds from OS for this device */
6025 for (i = 0; i < 10; i++) {
6026 if (atomic_read(&dev->commands_outstanding) > 0)
6027 msleep(1000);
6028 else
6029 break;
6030 }
6031
6032 /* send a reset to the SCSI LUN which the command was sent to */
6033 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6034 if (rc == 0)
6035 rc = SUCCESS;
6036 else
6037 rc = FAILED;
6038
6039 sprintf(msg, "reset %s %s",
6040 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6041 rc == SUCCESS ? "completed successfully" : "failed");
6042 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6043
6044return_reset_status:
6045 spin_lock_irqsave(&h->reset_lock, flags);
6046 h->reset_in_progress = 0;
6047 if (dev)
6048 dev->in_reset = false;
6049 spin_unlock_irqrestore(&h->reset_lock, flags);
6050 return rc;
6051}
6052
6053/*
6054 * For operations with an associated SCSI command, a command block is allocated
6055 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6056 * block request tag as an index into a table of entries. cmd_tagged_free() is
6057 * the complement, although cmd_free() may be called instead.
6058 */
6059static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6060 struct scsi_cmnd *scmd)
6061{
6062 int idx = hpsa_get_cmd_index(scmd);
6063 struct CommandList *c = h->cmd_pool + idx;
6064
6065 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6066 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6067 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6068 /* The index value comes from the block layer, so if it's out of
6069 * bounds, it's probably not our bug.
6070 */
6071 BUG();
6072 }
6073
6074 if (unlikely(!hpsa_is_cmd_idle(c))) {
6075 /*
6076 * We expect that the SCSI layer will hand us a unique tag
6077 * value. Thus, there should never be a collision here between
6078 * two requests...because if the selected command isn't idle
6079 * then someone is going to be very disappointed.
6080 */
6081 if (idx != h->last_collision_tag) { /* Print once per tag */
6082 dev_warn(&h->pdev->dev,
6083 "%s: tag collision (tag=%d)\n", __func__, idx);
6084 if (c->scsi_cmd != NULL)
6085 scsi_print_command(c->scsi_cmd);
6086 if (scmd)
6087 scsi_print_command(scmd);
6088 h->last_collision_tag = idx;
6089 }
6090 return NULL;
6091 }
6092
6093 atomic_inc(&c->refcount);
6094
6095 hpsa_cmd_partial_init(h, idx, c);
6096 return c;
6097}
6098
6099static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6100{
6101 /*
6102 * Release our reference to the block. We don't need to do anything
6103 * else to free it, because it is accessed by index.
6104 */
6105 (void)atomic_dec(&c->refcount);
6106}
6107
6108/*
6109 * For operations that cannot sleep, a command block is allocated at init,
6110 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6111 * which ones are free or in use. Lock must be held when calling this.
6112 * cmd_free() is the complement.
6113 * This function never gives up and returns NULL. If it hangs,
6114 * another thread must call cmd_free() to free some tags.
6115 */
6116
6117static struct CommandList *cmd_alloc(struct ctlr_info *h)
6118{
6119 struct CommandList *c;
6120 int refcount, i;
6121 int offset = 0;
6122
6123 /*
6124 * There is some *extremely* small but non-zero chance that that
6125 * multiple threads could get in here, and one thread could
6126 * be scanning through the list of bits looking for a free
6127 * one, but the free ones are always behind him, and other
6128 * threads sneak in behind him and eat them before he can
6129 * get to them, so that while there is always a free one, a
6130 * very unlucky thread might be starved anyway, never able to
6131 * beat the other threads. In reality, this happens so
6132 * infrequently as to be indistinguishable from never.
6133 *
6134 * Note that we start allocating commands before the SCSI host structure
6135 * is initialized. Since the search starts at bit zero, this
6136 * all works, since we have at least one command structure available;
6137 * however, it means that the structures with the low indexes have to be
6138 * reserved for driver-initiated requests, while requests from the block
6139 * layer will use the higher indexes.
6140 */
6141
6142 for (;;) {
6143 i = find_next_zero_bit(h->cmd_pool_bits,
6144 HPSA_NRESERVED_CMDS,
6145 offset);
6146 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6147 offset = 0;
6148 continue;
6149 }
6150 c = h->cmd_pool + i;
6151 refcount = atomic_inc_return(&c->refcount);
6152 if (unlikely(refcount > 1)) {
6153 cmd_free(h, c); /* already in use */
6154 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6155 continue;
6156 }
6157 set_bit(i & (BITS_PER_LONG - 1),
6158 h->cmd_pool_bits + (i / BITS_PER_LONG));
6159 break; /* it's ours now. */
6160 }
6161 hpsa_cmd_partial_init(h, i, c);
6162 c->device = NULL;
6163 return c;
6164}
6165
6166/*
6167 * This is the complementary operation to cmd_alloc(). Note, however, in some
6168 * corner cases it may also be used to free blocks allocated by
6169 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6170 * the clear-bit is harmless.
6171 */
6172static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6173{
6174 if (atomic_dec_and_test(&c->refcount)) {
6175 int i;
6176
6177 i = c - h->cmd_pool;
6178 clear_bit(i & (BITS_PER_LONG - 1),
6179 h->cmd_pool_bits + (i / BITS_PER_LONG));
6180 }
6181}
6182
6183#ifdef CONFIG_COMPAT
6184
6185static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6186 void __user *arg)
6187{
6188 IOCTL32_Command_struct __user *arg32 =
6189 (IOCTL32_Command_struct __user *) arg;
6190 IOCTL_Command_struct arg64;
6191 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6192 int err;
6193 u32 cp;
6194
6195 memset(&arg64, 0, sizeof(arg64));
6196 err = 0;
6197 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6198 sizeof(arg64.LUN_info));
6199 err |= copy_from_user(&arg64.Request, &arg32->Request,
6200 sizeof(arg64.Request));
6201 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6202 sizeof(arg64.error_info));
6203 err |= get_user(arg64.buf_size, &arg32->buf_size);
6204 err |= get_user(cp, &arg32->buf);
6205 arg64.buf = compat_ptr(cp);
6206 err |= copy_to_user(p, &arg64, sizeof(arg64));
6207
6208 if (err)
6209 return -EFAULT;
6210
6211 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6212 if (err)
6213 return err;
6214 err |= copy_in_user(&arg32->error_info, &p->error_info,
6215 sizeof(arg32->error_info));
6216 if (err)
6217 return -EFAULT;
6218 return err;
6219}
6220
6221static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6222 unsigned int cmd, void __user *arg)
6223{
6224 BIG_IOCTL32_Command_struct __user *arg32 =
6225 (BIG_IOCTL32_Command_struct __user *) arg;
6226 BIG_IOCTL_Command_struct arg64;
6227 BIG_IOCTL_Command_struct __user *p =
6228 compat_alloc_user_space(sizeof(arg64));
6229 int err;
6230 u32 cp;
6231
6232 memset(&arg64, 0, sizeof(arg64));
6233 err = 0;
6234 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6235 sizeof(arg64.LUN_info));
6236 err |= copy_from_user(&arg64.Request, &arg32->Request,
6237 sizeof(arg64.Request));
6238 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6239 sizeof(arg64.error_info));
6240 err |= get_user(arg64.buf_size, &arg32->buf_size);
6241 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6242 err |= get_user(cp, &arg32->buf);
6243 arg64.buf = compat_ptr(cp);
6244 err |= copy_to_user(p, &arg64, sizeof(arg64));
6245
6246 if (err)
6247 return -EFAULT;
6248
6249 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6250 if (err)
6251 return err;
6252 err |= copy_in_user(&arg32->error_info, &p->error_info,
6253 sizeof(arg32->error_info));
6254 if (err)
6255 return -EFAULT;
6256 return err;
6257}
6258
6259static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6260 void __user *arg)
6261{
6262 switch (cmd) {
6263 case CCISS_GETPCIINFO:
6264 case CCISS_GETINTINFO:
6265 case CCISS_SETINTINFO:
6266 case CCISS_GETNODENAME:
6267 case CCISS_SETNODENAME:
6268 case CCISS_GETHEARTBEAT:
6269 case CCISS_GETBUSTYPES:
6270 case CCISS_GETFIRMVER:
6271 case CCISS_GETDRIVVER:
6272 case CCISS_REVALIDVOLS:
6273 case CCISS_DEREGDISK:
6274 case CCISS_REGNEWDISK:
6275 case CCISS_REGNEWD:
6276 case CCISS_RESCANDISK:
6277 case CCISS_GETLUNINFO:
6278 return hpsa_ioctl(dev, cmd, arg);
6279
6280 case CCISS_PASSTHRU32:
6281 return hpsa_ioctl32_passthru(dev, cmd, arg);
6282 case CCISS_BIG_PASSTHRU32:
6283 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6284
6285 default:
6286 return -ENOIOCTLCMD;
6287 }
6288}
6289#endif
6290
6291static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6292{
6293 struct hpsa_pci_info pciinfo;
6294
6295 if (!argp)
6296 return -EINVAL;
6297 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6298 pciinfo.bus = h->pdev->bus->number;
6299 pciinfo.dev_fn = h->pdev->devfn;
6300 pciinfo.board_id = h->board_id;
6301 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6302 return -EFAULT;
6303 return 0;
6304}
6305
6306static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6307{
6308 DriverVer_type DriverVer;
6309 unsigned char vmaj, vmin, vsubmin;
6310 int rc;
6311
6312 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6313 &vmaj, &vmin, &vsubmin);
6314 if (rc != 3) {
6315 dev_info(&h->pdev->dev, "driver version string '%s' "
6316 "unrecognized.", HPSA_DRIVER_VERSION);
6317 vmaj = 0;
6318 vmin = 0;
6319 vsubmin = 0;
6320 }
6321 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6322 if (!argp)
6323 return -EINVAL;
6324 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6325 return -EFAULT;
6326 return 0;
6327}
6328
6329static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6330{
6331 IOCTL_Command_struct iocommand;
6332 struct CommandList *c;
6333 char *buff = NULL;
6334 u64 temp64;
6335 int rc = 0;
6336
6337 if (!argp)
6338 return -EINVAL;
6339 if (!capable(CAP_SYS_RAWIO))
6340 return -EPERM;
6341 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6342 return -EFAULT;
6343 if ((iocommand.buf_size < 1) &&
6344 (iocommand.Request.Type.Direction != XFER_NONE)) {
6345 return -EINVAL;
6346 }
6347 if (iocommand.buf_size > 0) {
6348 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6349 if (buff == NULL)
6350 return -ENOMEM;
6351 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6352 /* Copy the data into the buffer we created */
6353 if (copy_from_user(buff, iocommand.buf,
6354 iocommand.buf_size)) {
6355 rc = -EFAULT;
6356 goto out_kfree;
6357 }
6358 } else {
6359 memset(buff, 0, iocommand.buf_size);
6360 }
6361 }
6362 c = cmd_alloc(h);
6363
6364 /* Fill in the command type */
6365 c->cmd_type = CMD_IOCTL_PEND;
6366 c->scsi_cmd = SCSI_CMD_BUSY;
6367 /* Fill in Command Header */
6368 c->Header.ReplyQueue = 0; /* unused in simple mode */
6369 if (iocommand.buf_size > 0) { /* buffer to fill */
6370 c->Header.SGList = 1;
6371 c->Header.SGTotal = cpu_to_le16(1);
6372 } else { /* no buffers to fill */
6373 c->Header.SGList = 0;
6374 c->Header.SGTotal = cpu_to_le16(0);
6375 }
6376 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6377
6378 /* Fill in Request block */
6379 memcpy(&c->Request, &iocommand.Request,
6380 sizeof(c->Request));
6381
6382 /* Fill in the scatter gather information */
6383 if (iocommand.buf_size > 0) {
6384 temp64 = dma_map_single(&h->pdev->dev, buff,
6385 iocommand.buf_size, DMA_BIDIRECTIONAL);
6386 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6387 c->SG[0].Addr = cpu_to_le64(0);
6388 c->SG[0].Len = cpu_to_le32(0);
6389 rc = -ENOMEM;
6390 goto out;
6391 }
6392 c->SG[0].Addr = cpu_to_le64(temp64);
6393 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6394 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6395 }
6396 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6397 NO_TIMEOUT);
6398 if (iocommand.buf_size > 0)
6399 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6400 check_ioctl_unit_attention(h, c);
6401 if (rc) {
6402 rc = -EIO;
6403 goto out;
6404 }
6405
6406 /* Copy the error information out */
6407 memcpy(&iocommand.error_info, c->err_info,
6408 sizeof(iocommand.error_info));
6409 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6410 rc = -EFAULT;
6411 goto out;
6412 }
6413 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6414 iocommand.buf_size > 0) {
6415 /* Copy the data out of the buffer we created */
6416 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6417 rc = -EFAULT;
6418 goto out;
6419 }
6420 }
6421out:
6422 cmd_free(h, c);
6423out_kfree:
6424 kfree(buff);
6425 return rc;
6426}
6427
6428static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6429{
6430 BIG_IOCTL_Command_struct *ioc;
6431 struct CommandList *c;
6432 unsigned char **buff = NULL;
6433 int *buff_size = NULL;
6434 u64 temp64;
6435 BYTE sg_used = 0;
6436 int status = 0;
6437 u32 left;
6438 u32 sz;
6439 BYTE __user *data_ptr;
6440
6441 if (!argp)
6442 return -EINVAL;
6443 if (!capable(CAP_SYS_RAWIO))
6444 return -EPERM;
6445 ioc = vmemdup_user(argp, sizeof(*ioc));
6446 if (IS_ERR(ioc)) {
6447 status = PTR_ERR(ioc);
6448 goto cleanup1;
6449 }
6450 if ((ioc->buf_size < 1) &&
6451 (ioc->Request.Type.Direction != XFER_NONE)) {
6452 status = -EINVAL;
6453 goto cleanup1;
6454 }
6455 /* Check kmalloc limits using all SGs */
6456 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6457 status = -EINVAL;
6458 goto cleanup1;
6459 }
6460 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6461 status = -EINVAL;
6462 goto cleanup1;
6463 }
6464 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6465 if (!buff) {
6466 status = -ENOMEM;
6467 goto cleanup1;
6468 }
6469 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6470 if (!buff_size) {
6471 status = -ENOMEM;
6472 goto cleanup1;
6473 }
6474 left = ioc->buf_size;
6475 data_ptr = ioc->buf;
6476 while (left) {
6477 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6478 buff_size[sg_used] = sz;
6479 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6480 if (buff[sg_used] == NULL) {
6481 status = -ENOMEM;
6482 goto cleanup1;
6483 }
6484 if (ioc->Request.Type.Direction & XFER_WRITE) {
6485 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6486 status = -EFAULT;
6487 goto cleanup1;
6488 }
6489 } else
6490 memset(buff[sg_used], 0, sz);
6491 left -= sz;
6492 data_ptr += sz;
6493 sg_used++;
6494 }
6495 c = cmd_alloc(h);
6496
6497 c->cmd_type = CMD_IOCTL_PEND;
6498 c->scsi_cmd = SCSI_CMD_BUSY;
6499 c->Header.ReplyQueue = 0;
6500 c->Header.SGList = (u8) sg_used;
6501 c->Header.SGTotal = cpu_to_le16(sg_used);
6502 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6503 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6504 if (ioc->buf_size > 0) {
6505 int i;
6506 for (i = 0; i < sg_used; i++) {
6507 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6508 buff_size[i], DMA_BIDIRECTIONAL);
6509 if (dma_mapping_error(&h->pdev->dev,
6510 (dma_addr_t) temp64)) {
6511 c->SG[i].Addr = cpu_to_le64(0);
6512 c->SG[i].Len = cpu_to_le32(0);
6513 hpsa_pci_unmap(h->pdev, c, i,
6514 DMA_BIDIRECTIONAL);
6515 status = -ENOMEM;
6516 goto cleanup0;
6517 }
6518 c->SG[i].Addr = cpu_to_le64(temp64);
6519 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6520 c->SG[i].Ext = cpu_to_le32(0);
6521 }
6522 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6523 }
6524 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6525 NO_TIMEOUT);
6526 if (sg_used)
6527 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6528 check_ioctl_unit_attention(h, c);
6529 if (status) {
6530 status = -EIO;
6531 goto cleanup0;
6532 }
6533
6534 /* Copy the error information out */
6535 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6536 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6537 status = -EFAULT;
6538 goto cleanup0;
6539 }
6540 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6541 int i;
6542
6543 /* Copy the data out of the buffer we created */
6544 BYTE __user *ptr = ioc->buf;
6545 for (i = 0; i < sg_used; i++) {
6546 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6547 status = -EFAULT;
6548 goto cleanup0;
6549 }
6550 ptr += buff_size[i];
6551 }
6552 }
6553 status = 0;
6554cleanup0:
6555 cmd_free(h, c);
6556cleanup1:
6557 if (buff) {
6558 int i;
6559
6560 for (i = 0; i < sg_used; i++)
6561 kfree(buff[i]);
6562 kfree(buff);
6563 }
6564 kfree(buff_size);
6565 kvfree(ioc);
6566 return status;
6567}
6568
6569static void check_ioctl_unit_attention(struct ctlr_info *h,
6570 struct CommandList *c)
6571{
6572 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6573 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6574 (void) check_for_unit_attention(h, c);
6575}
6576
6577/*
6578 * ioctl
6579 */
6580static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6581 void __user *arg)
6582{
6583 struct ctlr_info *h;
6584 void __user *argp = (void __user *)arg;
6585 int rc;
6586
6587 h = sdev_to_hba(dev);
6588
6589 switch (cmd) {
6590 case CCISS_DEREGDISK:
6591 case CCISS_REGNEWDISK:
6592 case CCISS_REGNEWD:
6593 hpsa_scan_start(h->scsi_host);
6594 return 0;
6595 case CCISS_GETPCIINFO:
6596 return hpsa_getpciinfo_ioctl(h, argp);
6597 case CCISS_GETDRIVVER:
6598 return hpsa_getdrivver_ioctl(h, argp);
6599 case CCISS_PASSTHRU:
6600 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6601 return -EAGAIN;
6602 rc = hpsa_passthru_ioctl(h, argp);
6603 atomic_inc(&h->passthru_cmds_avail);
6604 return rc;
6605 case CCISS_BIG_PASSTHRU:
6606 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6607 return -EAGAIN;
6608 rc = hpsa_big_passthru_ioctl(h, argp);
6609 atomic_inc(&h->passthru_cmds_avail);
6610 return rc;
6611 default:
6612 return -ENOTTY;
6613 }
6614}
6615
6616static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6617{
6618 struct CommandList *c;
6619
6620 c = cmd_alloc(h);
6621
6622 /* fill_cmd can't fail here, no data buffer to map */
6623 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6624 RAID_CTLR_LUNID, TYPE_MSG);
6625 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6626 c->waiting = NULL;
6627 enqueue_cmd_and_start_io(h, c);
6628 /* Don't wait for completion, the reset won't complete. Don't free
6629 * the command either. This is the last command we will send before
6630 * re-initializing everything, so it doesn't matter and won't leak.
6631 */
6632 return;
6633}
6634
6635static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6636 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6637 int cmd_type)
6638{
6639 enum dma_data_direction dir = DMA_NONE;
6640
6641 c->cmd_type = CMD_IOCTL_PEND;
6642 c->scsi_cmd = SCSI_CMD_BUSY;
6643 c->Header.ReplyQueue = 0;
6644 if (buff != NULL && size > 0) {
6645 c->Header.SGList = 1;
6646 c->Header.SGTotal = cpu_to_le16(1);
6647 } else {
6648 c->Header.SGList = 0;
6649 c->Header.SGTotal = cpu_to_le16(0);
6650 }
6651 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6652
6653 if (cmd_type == TYPE_CMD) {
6654 switch (cmd) {
6655 case HPSA_INQUIRY:
6656 /* are we trying to read a vital product page */
6657 if (page_code & VPD_PAGE) {
6658 c->Request.CDB[1] = 0x01;
6659 c->Request.CDB[2] = (page_code & 0xff);
6660 }
6661 c->Request.CDBLen = 6;
6662 c->Request.type_attr_dir =
6663 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6664 c->Request.Timeout = 0;
6665 c->Request.CDB[0] = HPSA_INQUIRY;
6666 c->Request.CDB[4] = size & 0xFF;
6667 break;
6668 case RECEIVE_DIAGNOSTIC:
6669 c->Request.CDBLen = 6;
6670 c->Request.type_attr_dir =
6671 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6672 c->Request.Timeout = 0;
6673 c->Request.CDB[0] = cmd;
6674 c->Request.CDB[1] = 1;
6675 c->Request.CDB[2] = 1;
6676 c->Request.CDB[3] = (size >> 8) & 0xFF;
6677 c->Request.CDB[4] = size & 0xFF;
6678 break;
6679 case HPSA_REPORT_LOG:
6680 case HPSA_REPORT_PHYS:
6681 /* Talking to controller so It's a physical command
6682 mode = 00 target = 0. Nothing to write.
6683 */
6684 c->Request.CDBLen = 12;
6685 c->Request.type_attr_dir =
6686 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6687 c->Request.Timeout = 0;
6688 c->Request.CDB[0] = cmd;
6689 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6690 c->Request.CDB[7] = (size >> 16) & 0xFF;
6691 c->Request.CDB[8] = (size >> 8) & 0xFF;
6692 c->Request.CDB[9] = size & 0xFF;
6693 break;
6694 case BMIC_SENSE_DIAG_OPTIONS:
6695 c->Request.CDBLen = 16;
6696 c->Request.type_attr_dir =
6697 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6698 c->Request.Timeout = 0;
6699 /* Spec says this should be BMIC_WRITE */
6700 c->Request.CDB[0] = BMIC_READ;
6701 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6702 break;
6703 case BMIC_SET_DIAG_OPTIONS:
6704 c->Request.CDBLen = 16;
6705 c->Request.type_attr_dir =
6706 TYPE_ATTR_DIR(cmd_type,
6707 ATTR_SIMPLE, XFER_WRITE);
6708 c->Request.Timeout = 0;
6709 c->Request.CDB[0] = BMIC_WRITE;
6710 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6711 break;
6712 case HPSA_CACHE_FLUSH:
6713 c->Request.CDBLen = 12;
6714 c->Request.type_attr_dir =
6715 TYPE_ATTR_DIR(cmd_type,
6716 ATTR_SIMPLE, XFER_WRITE);
6717 c->Request.Timeout = 0;
6718 c->Request.CDB[0] = BMIC_WRITE;
6719 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6720 c->Request.CDB[7] = (size >> 8) & 0xFF;
6721 c->Request.CDB[8] = size & 0xFF;
6722 break;
6723 case TEST_UNIT_READY:
6724 c->Request.CDBLen = 6;
6725 c->Request.type_attr_dir =
6726 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6727 c->Request.Timeout = 0;
6728 break;
6729 case HPSA_GET_RAID_MAP:
6730 c->Request.CDBLen = 12;
6731 c->Request.type_attr_dir =
6732 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6733 c->Request.Timeout = 0;
6734 c->Request.CDB[0] = HPSA_CISS_READ;
6735 c->Request.CDB[1] = cmd;
6736 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6737 c->Request.CDB[7] = (size >> 16) & 0xFF;
6738 c->Request.CDB[8] = (size >> 8) & 0xFF;
6739 c->Request.CDB[9] = size & 0xFF;
6740 break;
6741 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6742 c->Request.CDBLen = 10;
6743 c->Request.type_attr_dir =
6744 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6745 c->Request.Timeout = 0;
6746 c->Request.CDB[0] = BMIC_READ;
6747 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6748 c->Request.CDB[7] = (size >> 16) & 0xFF;
6749 c->Request.CDB[8] = (size >> 8) & 0xFF;
6750 break;
6751 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6752 c->Request.CDBLen = 10;
6753 c->Request.type_attr_dir =
6754 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6755 c->Request.Timeout = 0;
6756 c->Request.CDB[0] = BMIC_READ;
6757 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6758 c->Request.CDB[7] = (size >> 16) & 0xFF;
6759 c->Request.CDB[8] = (size >> 8) & 0XFF;
6760 break;
6761 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6762 c->Request.CDBLen = 10;
6763 c->Request.type_attr_dir =
6764 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6765 c->Request.Timeout = 0;
6766 c->Request.CDB[0] = BMIC_READ;
6767 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6768 c->Request.CDB[7] = (size >> 16) & 0xFF;
6769 c->Request.CDB[8] = (size >> 8) & 0XFF;
6770 break;
6771 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6772 c->Request.CDBLen = 10;
6773 c->Request.type_attr_dir =
6774 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6775 c->Request.Timeout = 0;
6776 c->Request.CDB[0] = BMIC_READ;
6777 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6778 c->Request.CDB[7] = (size >> 16) & 0xFF;
6779 c->Request.CDB[8] = (size >> 8) & 0XFF;
6780 break;
6781 case BMIC_IDENTIFY_CONTROLLER:
6782 c->Request.CDBLen = 10;
6783 c->Request.type_attr_dir =
6784 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6785 c->Request.Timeout = 0;
6786 c->Request.CDB[0] = BMIC_READ;
6787 c->Request.CDB[1] = 0;
6788 c->Request.CDB[2] = 0;
6789 c->Request.CDB[3] = 0;
6790 c->Request.CDB[4] = 0;
6791 c->Request.CDB[5] = 0;
6792 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6793 c->Request.CDB[7] = (size >> 16) & 0xFF;
6794 c->Request.CDB[8] = (size >> 8) & 0XFF;
6795 c->Request.CDB[9] = 0;
6796 break;
6797 default:
6798 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6799 BUG();
6800 }
6801 } else if (cmd_type == TYPE_MSG) {
6802 switch (cmd) {
6803
6804 case HPSA_PHYS_TARGET_RESET:
6805 c->Request.CDBLen = 16;
6806 c->Request.type_attr_dir =
6807 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6808 c->Request.Timeout = 0; /* Don't time out */
6809 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6810 c->Request.CDB[0] = HPSA_RESET;
6811 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6812 /* Physical target reset needs no control bytes 4-7*/
6813 c->Request.CDB[4] = 0x00;
6814 c->Request.CDB[5] = 0x00;
6815 c->Request.CDB[6] = 0x00;
6816 c->Request.CDB[7] = 0x00;
6817 break;
6818 case HPSA_DEVICE_RESET_MSG:
6819 c->Request.CDBLen = 16;
6820 c->Request.type_attr_dir =
6821 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6822 c->Request.Timeout = 0; /* Don't time out */
6823 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6824 c->Request.CDB[0] = cmd;
6825 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6826 /* If bytes 4-7 are zero, it means reset the */
6827 /* LunID device */
6828 c->Request.CDB[4] = 0x00;
6829 c->Request.CDB[5] = 0x00;
6830 c->Request.CDB[6] = 0x00;
6831 c->Request.CDB[7] = 0x00;
6832 break;
6833 default:
6834 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6835 cmd);
6836 BUG();
6837 }
6838 } else {
6839 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6840 BUG();
6841 }
6842
6843 switch (GET_DIR(c->Request.type_attr_dir)) {
6844 case XFER_READ:
6845 dir = DMA_FROM_DEVICE;
6846 break;
6847 case XFER_WRITE:
6848 dir = DMA_TO_DEVICE;
6849 break;
6850 case XFER_NONE:
6851 dir = DMA_NONE;
6852 break;
6853 default:
6854 dir = DMA_BIDIRECTIONAL;
6855 }
6856 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6857 return -1;
6858 return 0;
6859}
6860
6861/*
6862 * Map (physical) PCI mem into (virtual) kernel space
6863 */
6864static void __iomem *remap_pci_mem(ulong base, ulong size)
6865{
6866 ulong page_base = ((ulong) base) & PAGE_MASK;
6867 ulong page_offs = ((ulong) base) - page_base;
6868 void __iomem *page_remapped = ioremap_nocache(page_base,
6869 page_offs + size);
6870
6871 return page_remapped ? (page_remapped + page_offs) : NULL;
6872}
6873
6874static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6875{
6876 return h->access.command_completed(h, q);
6877}
6878
6879static inline bool interrupt_pending(struct ctlr_info *h)
6880{
6881 return h->access.intr_pending(h);
6882}
6883
6884static inline long interrupt_not_for_us(struct ctlr_info *h)
6885{
6886 return (h->access.intr_pending(h) == 0) ||
6887 (h->interrupts_enabled == 0);
6888}
6889
6890static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6891 u32 raw_tag)
6892{
6893 if (unlikely(tag_index >= h->nr_cmds)) {
6894 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6895 return 1;
6896 }
6897 return 0;
6898}
6899
6900static inline void finish_cmd(struct CommandList *c)
6901{
6902 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6903 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6904 || c->cmd_type == CMD_IOACCEL2))
6905 complete_scsi_command(c);
6906 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6907 complete(c->waiting);
6908}
6909
6910/* process completion of an indexed ("direct lookup") command */
6911static inline void process_indexed_cmd(struct ctlr_info *h,
6912 u32 raw_tag)
6913{
6914 u32 tag_index;
6915 struct CommandList *c;
6916
6917 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6918 if (!bad_tag(h, tag_index, raw_tag)) {
6919 c = h->cmd_pool + tag_index;
6920 finish_cmd(c);
6921 }
6922}
6923
6924/* Some controllers, like p400, will give us one interrupt
6925 * after a soft reset, even if we turned interrupts off.
6926 * Only need to check for this in the hpsa_xxx_discard_completions
6927 * functions.
6928 */
6929static int ignore_bogus_interrupt(struct ctlr_info *h)
6930{
6931 if (likely(!reset_devices))
6932 return 0;
6933
6934 if (likely(h->interrupts_enabled))
6935 return 0;
6936
6937 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6938 "(known firmware bug.) Ignoring.\n");
6939
6940 return 1;
6941}
6942
6943/*
6944 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6945 * Relies on (h-q[x] == x) being true for x such that
6946 * 0 <= x < MAX_REPLY_QUEUES.
6947 */
6948static struct ctlr_info *queue_to_hba(u8 *queue)
6949{
6950 return container_of((queue - *queue), struct ctlr_info, q[0]);
6951}
6952
6953static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6954{
6955 struct ctlr_info *h = queue_to_hba(queue);
6956 u8 q = *(u8 *) queue;
6957 u32 raw_tag;
6958
6959 if (ignore_bogus_interrupt(h))
6960 return IRQ_NONE;
6961
6962 if (interrupt_not_for_us(h))
6963 return IRQ_NONE;
6964 h->last_intr_timestamp = get_jiffies_64();
6965 while (interrupt_pending(h)) {
6966 raw_tag = get_next_completion(h, q);
6967 while (raw_tag != FIFO_EMPTY)
6968 raw_tag = next_command(h, q);
6969 }
6970 return IRQ_HANDLED;
6971}
6972
6973static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6974{
6975 struct ctlr_info *h = queue_to_hba(queue);
6976 u32 raw_tag;
6977 u8 q = *(u8 *) queue;
6978
6979 if (ignore_bogus_interrupt(h))
6980 return IRQ_NONE;
6981
6982 h->last_intr_timestamp = get_jiffies_64();
6983 raw_tag = get_next_completion(h, q);
6984 while (raw_tag != FIFO_EMPTY)
6985 raw_tag = next_command(h, q);
6986 return IRQ_HANDLED;
6987}
6988
6989static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6990{
6991 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6992 u32 raw_tag;
6993 u8 q = *(u8 *) queue;
6994
6995 if (interrupt_not_for_us(h))
6996 return IRQ_NONE;
6997 h->last_intr_timestamp = get_jiffies_64();
6998 while (interrupt_pending(h)) {
6999 raw_tag = get_next_completion(h, q);
7000 while (raw_tag != FIFO_EMPTY) {
7001 process_indexed_cmd(h, raw_tag);
7002 raw_tag = next_command(h, q);
7003 }
7004 }
7005 return IRQ_HANDLED;
7006}
7007
7008static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7009{
7010 struct ctlr_info *h = queue_to_hba(queue);
7011 u32 raw_tag;
7012 u8 q = *(u8 *) queue;
7013
7014 h->last_intr_timestamp = get_jiffies_64();
7015 raw_tag = get_next_completion(h, q);
7016 while (raw_tag != FIFO_EMPTY) {
7017 process_indexed_cmd(h, raw_tag);
7018 raw_tag = next_command(h, q);
7019 }
7020 return IRQ_HANDLED;
7021}
7022
7023/* Send a message CDB to the firmware. Careful, this only works
7024 * in simple mode, not performant mode due to the tag lookup.
7025 * We only ever use this immediately after a controller reset.
7026 */
7027static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7028 unsigned char type)
7029{
7030 struct Command {
7031 struct CommandListHeader CommandHeader;
7032 struct RequestBlock Request;
7033 struct ErrDescriptor ErrorDescriptor;
7034 };
7035 struct Command *cmd;
7036 static const size_t cmd_sz = sizeof(*cmd) +
7037 sizeof(cmd->ErrorDescriptor);
7038 dma_addr_t paddr64;
7039 __le32 paddr32;
7040 u32 tag;
7041 void __iomem *vaddr;
7042 int i, err;
7043
7044 vaddr = pci_ioremap_bar(pdev, 0);
7045 if (vaddr == NULL)
7046 return -ENOMEM;
7047
7048 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7049 * CCISS commands, so they must be allocated from the lower 4GiB of
7050 * memory.
7051 */
7052 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7053 if (err) {
7054 iounmap(vaddr);
7055 return err;
7056 }
7057
7058 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7059 if (cmd == NULL) {
7060 iounmap(vaddr);
7061 return -ENOMEM;
7062 }
7063
7064 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7065 * although there's no guarantee, we assume that the address is at
7066 * least 4-byte aligned (most likely, it's page-aligned).
7067 */
7068 paddr32 = cpu_to_le32(paddr64);
7069
7070 cmd->CommandHeader.ReplyQueue = 0;
7071 cmd->CommandHeader.SGList = 0;
7072 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7073 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7074 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7075
7076 cmd->Request.CDBLen = 16;
7077 cmd->Request.type_attr_dir =
7078 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7079 cmd->Request.Timeout = 0; /* Don't time out */
7080 cmd->Request.CDB[0] = opcode;
7081 cmd->Request.CDB[1] = type;
7082 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7083 cmd->ErrorDescriptor.Addr =
7084 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7085 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7086
7087 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7088
7089 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7090 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7091 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7092 break;
7093 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7094 }
7095
7096 iounmap(vaddr);
7097
7098 /* we leak the DMA buffer here ... no choice since the controller could
7099 * still complete the command.
7100 */
7101 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7102 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7103 opcode, type);
7104 return -ETIMEDOUT;
7105 }
7106
7107 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7108
7109 if (tag & HPSA_ERROR_BIT) {
7110 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7111 opcode, type);
7112 return -EIO;
7113 }
7114
7115 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7116 opcode, type);
7117 return 0;
7118}
7119
7120#define hpsa_noop(p) hpsa_message(p, 3, 0)
7121
7122static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7123 void __iomem *vaddr, u32 use_doorbell)
7124{
7125
7126 if (use_doorbell) {
7127 /* For everything after the P600, the PCI power state method
7128 * of resetting the controller doesn't work, so we have this
7129 * other way using the doorbell register.
7130 */
7131 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7132 writel(use_doorbell, vaddr + SA5_DOORBELL);
7133
7134 /* PMC hardware guys tell us we need a 10 second delay after
7135 * doorbell reset and before any attempt to talk to the board
7136 * at all to ensure that this actually works and doesn't fall
7137 * over in some weird corner cases.
7138 */
7139 msleep(10000);
7140 } else { /* Try to do it the PCI power state way */
7141
7142 /* Quoting from the Open CISS Specification: "The Power
7143 * Management Control/Status Register (CSR) controls the power
7144 * state of the device. The normal operating state is D0,
7145 * CSR=00h. The software off state is D3, CSR=03h. To reset
7146 * the controller, place the interface device in D3 then to D0,
7147 * this causes a secondary PCI reset which will reset the
7148 * controller." */
7149
7150 int rc = 0;
7151
7152 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7153
7154 /* enter the D3hot power management state */
7155 rc = pci_set_power_state(pdev, PCI_D3hot);
7156 if (rc)
7157 return rc;
7158
7159 msleep(500);
7160
7161 /* enter the D0 power management state */
7162 rc = pci_set_power_state(pdev, PCI_D0);
7163 if (rc)
7164 return rc;
7165
7166 /*
7167 * The P600 requires a small delay when changing states.
7168 * Otherwise we may think the board did not reset and we bail.
7169 * This for kdump only and is particular to the P600.
7170 */
7171 msleep(500);
7172 }
7173 return 0;
7174}
7175
7176static void init_driver_version(char *driver_version, int len)
7177{
7178 memset(driver_version, 0, len);
7179 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7180}
7181
7182static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7183{
7184 char *driver_version;
7185 int i, size = sizeof(cfgtable->driver_version);
7186
7187 driver_version = kmalloc(size, GFP_KERNEL);
7188 if (!driver_version)
7189 return -ENOMEM;
7190
7191 init_driver_version(driver_version, size);
7192 for (i = 0; i < size; i++)
7193 writeb(driver_version[i], &cfgtable->driver_version[i]);
7194 kfree(driver_version);
7195 return 0;
7196}
7197
7198static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7199 unsigned char *driver_ver)
7200{
7201 int i;
7202
7203 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7204 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7205}
7206
7207static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7208{
7209
7210 char *driver_ver, *old_driver_ver;
7211 int rc, size = sizeof(cfgtable->driver_version);
7212
7213 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7214 if (!old_driver_ver)
7215 return -ENOMEM;
7216 driver_ver = old_driver_ver + size;
7217
7218 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7219 * should have been changed, otherwise we know the reset failed.
7220 */
7221 init_driver_version(old_driver_ver, size);
7222 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7223 rc = !memcmp(driver_ver, old_driver_ver, size);
7224 kfree(old_driver_ver);
7225 return rc;
7226}
7227/* This does a hard reset of the controller using PCI power management
7228 * states or the using the doorbell register.
7229 */
7230static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7231{
7232 u64 cfg_offset;
7233 u32 cfg_base_addr;
7234 u64 cfg_base_addr_index;
7235 void __iomem *vaddr;
7236 unsigned long paddr;
7237 u32 misc_fw_support;
7238 int rc;
7239 struct CfgTable __iomem *cfgtable;
7240 u32 use_doorbell;
7241 u16 command_register;
7242
7243 /* For controllers as old as the P600, this is very nearly
7244 * the same thing as
7245 *
7246 * pci_save_state(pci_dev);
7247 * pci_set_power_state(pci_dev, PCI_D3hot);
7248 * pci_set_power_state(pci_dev, PCI_D0);
7249 * pci_restore_state(pci_dev);
7250 *
7251 * For controllers newer than the P600, the pci power state
7252 * method of resetting doesn't work so we have another way
7253 * using the doorbell register.
7254 */
7255
7256 if (!ctlr_is_resettable(board_id)) {
7257 dev_warn(&pdev->dev, "Controller not resettable\n");
7258 return -ENODEV;
7259 }
7260
7261 /* if controller is soft- but not hard resettable... */
7262 if (!ctlr_is_hard_resettable(board_id))
7263 return -ENOTSUPP; /* try soft reset later. */
7264
7265 /* Save the PCI command register */
7266 pci_read_config_word(pdev, 4, &command_register);
7267 pci_save_state(pdev);
7268
7269 /* find the first memory BAR, so we can find the cfg table */
7270 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7271 if (rc)
7272 return rc;
7273 vaddr = remap_pci_mem(paddr, 0x250);
7274 if (!vaddr)
7275 return -ENOMEM;
7276
7277 /* find cfgtable in order to check if reset via doorbell is supported */
7278 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7279 &cfg_base_addr_index, &cfg_offset);
7280 if (rc)
7281 goto unmap_vaddr;
7282 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7283 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7284 if (!cfgtable) {
7285 rc = -ENOMEM;
7286 goto unmap_vaddr;
7287 }
7288 rc = write_driver_ver_to_cfgtable(cfgtable);
7289 if (rc)
7290 goto unmap_cfgtable;
7291
7292 /* If reset via doorbell register is supported, use that.
7293 * There are two such methods. Favor the newest method.
7294 */
7295 misc_fw_support = readl(&cfgtable->misc_fw_support);
7296 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7297 if (use_doorbell) {
7298 use_doorbell = DOORBELL_CTLR_RESET2;
7299 } else {
7300 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7301 if (use_doorbell) {
7302 dev_warn(&pdev->dev,
7303 "Soft reset not supported. Firmware update is required.\n");
7304 rc = -ENOTSUPP; /* try soft reset */
7305 goto unmap_cfgtable;
7306 }
7307 }
7308
7309 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7310 if (rc)
7311 goto unmap_cfgtable;
7312
7313 pci_restore_state(pdev);
7314 pci_write_config_word(pdev, 4, command_register);
7315
7316 /* Some devices (notably the HP Smart Array 5i Controller)
7317 need a little pause here */
7318 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7319
7320 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7321 if (rc) {
7322 dev_warn(&pdev->dev,
7323 "Failed waiting for board to become ready after hard reset\n");
7324 goto unmap_cfgtable;
7325 }
7326
7327 rc = controller_reset_failed(vaddr);
7328 if (rc < 0)
7329 goto unmap_cfgtable;
7330 if (rc) {
7331 dev_warn(&pdev->dev, "Unable to successfully reset "
7332 "controller. Will try soft reset.\n");
7333 rc = -ENOTSUPP;
7334 } else {
7335 dev_info(&pdev->dev, "board ready after hard reset.\n");
7336 }
7337
7338unmap_cfgtable:
7339 iounmap(cfgtable);
7340
7341unmap_vaddr:
7342 iounmap(vaddr);
7343 return rc;
7344}
7345
7346/*
7347 * We cannot read the structure directly, for portability we must use
7348 * the io functions.
7349 * This is for debug only.
7350 */
7351static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7352{
7353#ifdef HPSA_DEBUG
7354 int i;
7355 char temp_name[17];
7356
7357 dev_info(dev, "Controller Configuration information\n");
7358 dev_info(dev, "------------------------------------\n");
7359 for (i = 0; i < 4; i++)
7360 temp_name[i] = readb(&(tb->Signature[i]));
7361 temp_name[4] = '\0';
7362 dev_info(dev, " Signature = %s\n", temp_name);
7363 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7364 dev_info(dev, " Transport methods supported = 0x%x\n",
7365 readl(&(tb->TransportSupport)));
7366 dev_info(dev, " Transport methods active = 0x%x\n",
7367 readl(&(tb->TransportActive)));
7368 dev_info(dev, " Requested transport Method = 0x%x\n",
7369 readl(&(tb->HostWrite.TransportRequest)));
7370 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7371 readl(&(tb->HostWrite.CoalIntDelay)));
7372 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7373 readl(&(tb->HostWrite.CoalIntCount)));
7374 dev_info(dev, " Max outstanding commands = %d\n",
7375 readl(&(tb->CmdsOutMax)));
7376 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7377 for (i = 0; i < 16; i++)
7378 temp_name[i] = readb(&(tb->ServerName[i]));
7379 temp_name[16] = '\0';
7380 dev_info(dev, " Server Name = %s\n", temp_name);
7381 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7382 readl(&(tb->HeartBeat)));
7383#endif /* HPSA_DEBUG */
7384}
7385
7386static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7387{
7388 int i, offset, mem_type, bar_type;
7389
7390 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7391 return 0;
7392 offset = 0;
7393 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7394 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7395 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7396 offset += 4;
7397 else {
7398 mem_type = pci_resource_flags(pdev, i) &
7399 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7400 switch (mem_type) {
7401 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7402 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7403 offset += 4; /* 32 bit */
7404 break;
7405 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7406 offset += 8;
7407 break;
7408 default: /* reserved in PCI 2.2 */
7409 dev_warn(&pdev->dev,
7410 "base address is invalid\n");
7411 return -1;
7412 break;
7413 }
7414 }
7415 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7416 return i + 1;
7417 }
7418 return -1;
7419}
7420
7421static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7422{
7423 pci_free_irq_vectors(h->pdev);
7424 h->msix_vectors = 0;
7425}
7426
7427static void hpsa_setup_reply_map(struct ctlr_info *h)
7428{
7429 const struct cpumask *mask;
7430 unsigned int queue, cpu;
7431
7432 for (queue = 0; queue < h->msix_vectors; queue++) {
7433 mask = pci_irq_get_affinity(h->pdev, queue);
7434 if (!mask)
7435 goto fallback;
7436
7437 for_each_cpu(cpu, mask)
7438 h->reply_map[cpu] = queue;
7439 }
7440 return;
7441
7442fallback:
7443 for_each_possible_cpu(cpu)
7444 h->reply_map[cpu] = 0;
7445}
7446
7447/* If MSI/MSI-X is supported by the kernel we will try to enable it on
7448 * controllers that are capable. If not, we use legacy INTx mode.
7449 */
7450static int hpsa_interrupt_mode(struct ctlr_info *h)
7451{
7452 unsigned int flags = PCI_IRQ_LEGACY;
7453 int ret;
7454
7455 /* Some boards advertise MSI but don't really support it */
7456 switch (h->board_id) {
7457 case 0x40700E11:
7458 case 0x40800E11:
7459 case 0x40820E11:
7460 case 0x40830E11:
7461 break;
7462 default:
7463 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7464 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7465 if (ret > 0) {
7466 h->msix_vectors = ret;
7467 return 0;
7468 }
7469
7470 flags |= PCI_IRQ_MSI;
7471 break;
7472 }
7473
7474 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7475 if (ret < 0)
7476 return ret;
7477 return 0;
7478}
7479
7480static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7481 bool *legacy_board)
7482{
7483 int i;
7484 u32 subsystem_vendor_id, subsystem_device_id;
7485
7486 subsystem_vendor_id = pdev->subsystem_vendor;
7487 subsystem_device_id = pdev->subsystem_device;
7488 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7489 subsystem_vendor_id;
7490
7491 if (legacy_board)
7492 *legacy_board = false;
7493 for (i = 0; i < ARRAY_SIZE(products); i++)
7494 if (*board_id == products[i].board_id) {
7495 if (products[i].access != &SA5A_access &&
7496 products[i].access != &SA5B_access)
7497 return i;
7498 dev_warn(&pdev->dev,
7499 "legacy board ID: 0x%08x\n",
7500 *board_id);
7501 if (legacy_board)
7502 *legacy_board = true;
7503 return i;
7504 }
7505
7506 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7507 if (legacy_board)
7508 *legacy_board = true;
7509 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7510}
7511
7512static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7513 unsigned long *memory_bar)
7514{
7515 int i;
7516
7517 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7518 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7519 /* addressing mode bits already removed */
7520 *memory_bar = pci_resource_start(pdev, i);
7521 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7522 *memory_bar);
7523 return 0;
7524 }
7525 dev_warn(&pdev->dev, "no memory BAR found\n");
7526 return -ENODEV;
7527}
7528
7529static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7530 int wait_for_ready)
7531{
7532 int i, iterations;
7533 u32 scratchpad;
7534 if (wait_for_ready)
7535 iterations = HPSA_BOARD_READY_ITERATIONS;
7536 else
7537 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7538
7539 for (i = 0; i < iterations; i++) {
7540 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7541 if (wait_for_ready) {
7542 if (scratchpad == HPSA_FIRMWARE_READY)
7543 return 0;
7544 } else {
7545 if (scratchpad != HPSA_FIRMWARE_READY)
7546 return 0;
7547 }
7548 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7549 }
7550 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7551 return -ENODEV;
7552}
7553
7554static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7555 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7556 u64 *cfg_offset)
7557{
7558 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7559 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7560 *cfg_base_addr &= (u32) 0x0000ffff;
7561 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7562 if (*cfg_base_addr_index == -1) {
7563 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7564 return -ENODEV;
7565 }
7566 return 0;
7567}
7568
7569static void hpsa_free_cfgtables(struct ctlr_info *h)
7570{
7571 if (h->transtable) {
7572 iounmap(h->transtable);
7573 h->transtable = NULL;
7574 }
7575 if (h->cfgtable) {
7576 iounmap(h->cfgtable);
7577 h->cfgtable = NULL;
7578 }
7579}
7580
7581/* Find and map CISS config table and transfer table
7582+ * several items must be unmapped (freed) later
7583+ * */
7584static int hpsa_find_cfgtables(struct ctlr_info *h)
7585{
7586 u64 cfg_offset;
7587 u32 cfg_base_addr;
7588 u64 cfg_base_addr_index;
7589 u32 trans_offset;
7590 int rc;
7591
7592 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7593 &cfg_base_addr_index, &cfg_offset);
7594 if (rc)
7595 return rc;
7596 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7597 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7598 if (!h->cfgtable) {
7599 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7600 return -ENOMEM;
7601 }
7602 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7603 if (rc)
7604 return rc;
7605 /* Find performant mode table. */
7606 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7607 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7608 cfg_base_addr_index)+cfg_offset+trans_offset,
7609 sizeof(*h->transtable));
7610 if (!h->transtable) {
7611 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7612 hpsa_free_cfgtables(h);
7613 return -ENOMEM;
7614 }
7615 return 0;
7616}
7617
7618static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7619{
7620#define MIN_MAX_COMMANDS 16
7621 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7622
7623 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7624
7625 /* Limit commands in memory limited kdump scenario. */
7626 if (reset_devices && h->max_commands > 32)
7627 h->max_commands = 32;
7628
7629 if (h->max_commands < MIN_MAX_COMMANDS) {
7630 dev_warn(&h->pdev->dev,
7631 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7632 h->max_commands,
7633 MIN_MAX_COMMANDS);
7634 h->max_commands = MIN_MAX_COMMANDS;
7635 }
7636}
7637
7638/* If the controller reports that the total max sg entries is greater than 512,
7639 * then we know that chained SG blocks work. (Original smart arrays did not
7640 * support chained SG blocks and would return zero for max sg entries.)
7641 */
7642static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7643{
7644 return h->maxsgentries > 512;
7645}
7646
7647/* Interrogate the hardware for some limits:
7648 * max commands, max SG elements without chaining, and with chaining,
7649 * SG chain block size, etc.
7650 */
7651static void hpsa_find_board_params(struct ctlr_info *h)
7652{
7653 hpsa_get_max_perf_mode_cmds(h);
7654 h->nr_cmds = h->max_commands;
7655 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7656 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7657 if (hpsa_supports_chained_sg_blocks(h)) {
7658 /* Limit in-command s/g elements to 32 save dma'able memory. */
7659 h->max_cmd_sg_entries = 32;
7660 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7661 h->maxsgentries--; /* save one for chain pointer */
7662 } else {
7663 /*
7664 * Original smart arrays supported at most 31 s/g entries
7665 * embedded inline in the command (trying to use more
7666 * would lock up the controller)
7667 */
7668 h->max_cmd_sg_entries = 31;
7669 h->maxsgentries = 31; /* default to traditional values */
7670 h->chainsize = 0;
7671 }
7672
7673 /* Find out what task management functions are supported and cache */
7674 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7675 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7676 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7677 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7678 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7679 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7680 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7681}
7682
7683static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7684{
7685 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7686 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7687 return false;
7688 }
7689 return true;
7690}
7691
7692static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7693{
7694 u32 driver_support;
7695
7696 driver_support = readl(&(h->cfgtable->driver_support));
7697 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7698#ifdef CONFIG_X86
7699 driver_support |= ENABLE_SCSI_PREFETCH;
7700#endif
7701 driver_support |= ENABLE_UNIT_ATTN;
7702 writel(driver_support, &(h->cfgtable->driver_support));
7703}
7704
7705/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7706 * in a prefetch beyond physical memory.
7707 */
7708static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7709{
7710 u32 dma_prefetch;
7711
7712 if (h->board_id != 0x3225103C)
7713 return;
7714 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7715 dma_prefetch |= 0x8000;
7716 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7717}
7718
7719static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7720{
7721 int i;
7722 u32 doorbell_value;
7723 unsigned long flags;
7724 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7725 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7726 spin_lock_irqsave(&h->lock, flags);
7727 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7728 spin_unlock_irqrestore(&h->lock, flags);
7729 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7730 goto done;
7731 /* delay and try again */
7732 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7733 }
7734 return -ENODEV;
7735done:
7736 return 0;
7737}
7738
7739static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7740{
7741 int i;
7742 u32 doorbell_value;
7743 unsigned long flags;
7744
7745 /* under certain very rare conditions, this can take awhile.
7746 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7747 * as we enter this code.)
7748 */
7749 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7750 if (h->remove_in_progress)
7751 goto done;
7752 spin_lock_irqsave(&h->lock, flags);
7753 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7754 spin_unlock_irqrestore(&h->lock, flags);
7755 if (!(doorbell_value & CFGTBL_ChangeReq))
7756 goto done;
7757 /* delay and try again */
7758 msleep(MODE_CHANGE_WAIT_INTERVAL);
7759 }
7760 return -ENODEV;
7761done:
7762 return 0;
7763}
7764
7765/* return -ENODEV or other reason on error, 0 on success */
7766static int hpsa_enter_simple_mode(struct ctlr_info *h)
7767{
7768 u32 trans_support;
7769
7770 trans_support = readl(&(h->cfgtable->TransportSupport));
7771 if (!(trans_support & SIMPLE_MODE))
7772 return -ENOTSUPP;
7773
7774 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7775
7776 /* Update the field, and then ring the doorbell */
7777 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7778 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7779 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7780 if (hpsa_wait_for_mode_change_ack(h))
7781 goto error;
7782 print_cfg_table(&h->pdev->dev, h->cfgtable);
7783 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7784 goto error;
7785 h->transMethod = CFGTBL_Trans_Simple;
7786 return 0;
7787error:
7788 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7789 return -ENODEV;
7790}
7791
7792/* free items allocated or mapped by hpsa_pci_init */
7793static void hpsa_free_pci_init(struct ctlr_info *h)
7794{
7795 hpsa_free_cfgtables(h); /* pci_init 4 */
7796 iounmap(h->vaddr); /* pci_init 3 */
7797 h->vaddr = NULL;
7798 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7799 /*
7800 * call pci_disable_device before pci_release_regions per
7801 * Documentation/driver-api/pci/pci.rst
7802 */
7803 pci_disable_device(h->pdev); /* pci_init 1 */
7804 pci_release_regions(h->pdev); /* pci_init 2 */
7805}
7806
7807/* several items must be freed later */
7808static int hpsa_pci_init(struct ctlr_info *h)
7809{
7810 int prod_index, err;
7811 bool legacy_board;
7812
7813 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7814 if (prod_index < 0)
7815 return prod_index;
7816 h->product_name = products[prod_index].product_name;
7817 h->access = *(products[prod_index].access);
7818 h->legacy_board = legacy_board;
7819 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7820 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7821
7822 err = pci_enable_device(h->pdev);
7823 if (err) {
7824 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7825 pci_disable_device(h->pdev);
7826 return err;
7827 }
7828
7829 err = pci_request_regions(h->pdev, HPSA);
7830 if (err) {
7831 dev_err(&h->pdev->dev,
7832 "failed to obtain PCI resources\n");
7833 pci_disable_device(h->pdev);
7834 return err;
7835 }
7836
7837 pci_set_master(h->pdev);
7838
7839 err = hpsa_interrupt_mode(h);
7840 if (err)
7841 goto clean1;
7842
7843 /* setup mapping between CPU and reply queue */
7844 hpsa_setup_reply_map(h);
7845
7846 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7847 if (err)
7848 goto clean2; /* intmode+region, pci */
7849 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7850 if (!h->vaddr) {
7851 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7852 err = -ENOMEM;
7853 goto clean2; /* intmode+region, pci */
7854 }
7855 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7856 if (err)
7857 goto clean3; /* vaddr, intmode+region, pci */
7858 err = hpsa_find_cfgtables(h);
7859 if (err)
7860 goto clean3; /* vaddr, intmode+region, pci */
7861 hpsa_find_board_params(h);
7862
7863 if (!hpsa_CISS_signature_present(h)) {
7864 err = -ENODEV;
7865 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7866 }
7867 hpsa_set_driver_support_bits(h);
7868 hpsa_p600_dma_prefetch_quirk(h);
7869 err = hpsa_enter_simple_mode(h);
7870 if (err)
7871 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7872 return 0;
7873
7874clean4: /* cfgtables, vaddr, intmode+region, pci */
7875 hpsa_free_cfgtables(h);
7876clean3: /* vaddr, intmode+region, pci */
7877 iounmap(h->vaddr);
7878 h->vaddr = NULL;
7879clean2: /* intmode+region, pci */
7880 hpsa_disable_interrupt_mode(h);
7881clean1:
7882 /*
7883 * call pci_disable_device before pci_release_regions per
7884 * Documentation/driver-api/pci/pci.rst
7885 */
7886 pci_disable_device(h->pdev);
7887 pci_release_regions(h->pdev);
7888 return err;
7889}
7890
7891static void hpsa_hba_inquiry(struct ctlr_info *h)
7892{
7893 int rc;
7894
7895#define HBA_INQUIRY_BYTE_COUNT 64
7896 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7897 if (!h->hba_inquiry_data)
7898 return;
7899 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7900 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7901 if (rc != 0) {
7902 kfree(h->hba_inquiry_data);
7903 h->hba_inquiry_data = NULL;
7904 }
7905}
7906
7907static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7908{
7909 int rc, i;
7910 void __iomem *vaddr;
7911
7912 if (!reset_devices)
7913 return 0;
7914
7915 /* kdump kernel is loading, we don't know in which state is
7916 * the pci interface. The dev->enable_cnt is equal zero
7917 * so we call enable+disable, wait a while and switch it on.
7918 */
7919 rc = pci_enable_device(pdev);
7920 if (rc) {
7921 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7922 return -ENODEV;
7923 }
7924 pci_disable_device(pdev);
7925 msleep(260); /* a randomly chosen number */
7926 rc = pci_enable_device(pdev);
7927 if (rc) {
7928 dev_warn(&pdev->dev, "failed to enable device.\n");
7929 return -ENODEV;
7930 }
7931
7932 pci_set_master(pdev);
7933
7934 vaddr = pci_ioremap_bar(pdev, 0);
7935 if (vaddr == NULL) {
7936 rc = -ENOMEM;
7937 goto out_disable;
7938 }
7939 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7940 iounmap(vaddr);
7941
7942 /* Reset the controller with a PCI power-cycle or via doorbell */
7943 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7944
7945 /* -ENOTSUPP here means we cannot reset the controller
7946 * but it's already (and still) up and running in
7947 * "performant mode". Or, it might be 640x, which can't reset
7948 * due to concerns about shared bbwc between 6402/6404 pair.
7949 */
7950 if (rc)
7951 goto out_disable;
7952
7953 /* Now try to get the controller to respond to a no-op */
7954 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7955 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7956 if (hpsa_noop(pdev) == 0)
7957 break;
7958 else
7959 dev_warn(&pdev->dev, "no-op failed%s\n",
7960 (i < 11 ? "; re-trying" : ""));
7961 }
7962
7963out_disable:
7964
7965 pci_disable_device(pdev);
7966 return rc;
7967}
7968
7969static void hpsa_free_cmd_pool(struct ctlr_info *h)
7970{
7971 kfree(h->cmd_pool_bits);
7972 h->cmd_pool_bits = NULL;
7973 if (h->cmd_pool) {
7974 dma_free_coherent(&h->pdev->dev,
7975 h->nr_cmds * sizeof(struct CommandList),
7976 h->cmd_pool,
7977 h->cmd_pool_dhandle);
7978 h->cmd_pool = NULL;
7979 h->cmd_pool_dhandle = 0;
7980 }
7981 if (h->errinfo_pool) {
7982 dma_free_coherent(&h->pdev->dev,
7983 h->nr_cmds * sizeof(struct ErrorInfo),
7984 h->errinfo_pool,
7985 h->errinfo_pool_dhandle);
7986 h->errinfo_pool = NULL;
7987 h->errinfo_pool_dhandle = 0;
7988 }
7989}
7990
7991static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7992{
7993 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
7994 sizeof(unsigned long),
7995 GFP_KERNEL);
7996 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
7997 h->nr_cmds * sizeof(*h->cmd_pool),
7998 &h->cmd_pool_dhandle, GFP_KERNEL);
7999 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8000 h->nr_cmds * sizeof(*h->errinfo_pool),
8001 &h->errinfo_pool_dhandle, GFP_KERNEL);
8002 if ((h->cmd_pool_bits == NULL)
8003 || (h->cmd_pool == NULL)
8004 || (h->errinfo_pool == NULL)) {
8005 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8006 goto clean_up;
8007 }
8008 hpsa_preinitialize_commands(h);
8009 return 0;
8010clean_up:
8011 hpsa_free_cmd_pool(h);
8012 return -ENOMEM;
8013}
8014
8015/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8016static void hpsa_free_irqs(struct ctlr_info *h)
8017{
8018 int i;
8019 int irq_vector = 0;
8020
8021 if (hpsa_simple_mode)
8022 irq_vector = h->intr_mode;
8023
8024 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8025 /* Single reply queue, only one irq to free */
8026 free_irq(pci_irq_vector(h->pdev, irq_vector),
8027 &h->q[h->intr_mode]);
8028 h->q[h->intr_mode] = 0;
8029 return;
8030 }
8031
8032 for (i = 0; i < h->msix_vectors; i++) {
8033 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8034 h->q[i] = 0;
8035 }
8036 for (; i < MAX_REPLY_QUEUES; i++)
8037 h->q[i] = 0;
8038}
8039
8040/* returns 0 on success; cleans up and returns -Enn on error */
8041static int hpsa_request_irqs(struct ctlr_info *h,
8042 irqreturn_t (*msixhandler)(int, void *),
8043 irqreturn_t (*intxhandler)(int, void *))
8044{
8045 int rc, i;
8046 int irq_vector = 0;
8047
8048 if (hpsa_simple_mode)
8049 irq_vector = h->intr_mode;
8050
8051 /*
8052 * initialize h->q[x] = x so that interrupt handlers know which
8053 * queue to process.
8054 */
8055 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8056 h->q[i] = (u8) i;
8057
8058 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8059 /* If performant mode and MSI-X, use multiple reply queues */
8060 for (i = 0; i < h->msix_vectors; i++) {
8061 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8062 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8063 0, h->intrname[i],
8064 &h->q[i]);
8065 if (rc) {
8066 int j;
8067
8068 dev_err(&h->pdev->dev,
8069 "failed to get irq %d for %s\n",
8070 pci_irq_vector(h->pdev, i), h->devname);
8071 for (j = 0; j < i; j++) {
8072 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8073 h->q[j] = 0;
8074 }
8075 for (; j < MAX_REPLY_QUEUES; j++)
8076 h->q[j] = 0;
8077 return rc;
8078 }
8079 }
8080 } else {
8081 /* Use single reply pool */
8082 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8083 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8084 h->msix_vectors ? "x" : "");
8085 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8086 msixhandler, 0,
8087 h->intrname[0],
8088 &h->q[h->intr_mode]);
8089 } else {
8090 sprintf(h->intrname[h->intr_mode],
8091 "%s-intx", h->devname);
8092 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8093 intxhandler, IRQF_SHARED,
8094 h->intrname[0],
8095 &h->q[h->intr_mode]);
8096 }
8097 }
8098 if (rc) {
8099 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8100 pci_irq_vector(h->pdev, irq_vector), h->devname);
8101 hpsa_free_irqs(h);
8102 return -ENODEV;
8103 }
8104 return 0;
8105}
8106
8107static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8108{
8109 int rc;
8110 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8111
8112 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8113 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8114 if (rc) {
8115 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8116 return rc;
8117 }
8118
8119 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8120 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8121 if (rc) {
8122 dev_warn(&h->pdev->dev, "Board failed to become ready "
8123 "after soft reset.\n");
8124 return rc;
8125 }
8126
8127 return 0;
8128}
8129
8130static void hpsa_free_reply_queues(struct ctlr_info *h)
8131{
8132 int i;
8133
8134 for (i = 0; i < h->nreply_queues; i++) {
8135 if (!h->reply_queue[i].head)
8136 continue;
8137 dma_free_coherent(&h->pdev->dev,
8138 h->reply_queue_size,
8139 h->reply_queue[i].head,
8140 h->reply_queue[i].busaddr);
8141 h->reply_queue[i].head = NULL;
8142 h->reply_queue[i].busaddr = 0;
8143 }
8144 h->reply_queue_size = 0;
8145}
8146
8147static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8148{
8149 hpsa_free_performant_mode(h); /* init_one 7 */
8150 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8151 hpsa_free_cmd_pool(h); /* init_one 5 */
8152 hpsa_free_irqs(h); /* init_one 4 */
8153 scsi_host_put(h->scsi_host); /* init_one 3 */
8154 h->scsi_host = NULL; /* init_one 3 */
8155 hpsa_free_pci_init(h); /* init_one 2_5 */
8156 free_percpu(h->lockup_detected); /* init_one 2 */
8157 h->lockup_detected = NULL; /* init_one 2 */
8158 if (h->resubmit_wq) {
8159 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8160 h->resubmit_wq = NULL;
8161 }
8162 if (h->rescan_ctlr_wq) {
8163 destroy_workqueue(h->rescan_ctlr_wq);
8164 h->rescan_ctlr_wq = NULL;
8165 }
8166 if (h->monitor_ctlr_wq) {
8167 destroy_workqueue(h->monitor_ctlr_wq);
8168 h->monitor_ctlr_wq = NULL;
8169 }
8170
8171 kfree(h); /* init_one 1 */
8172}
8173
8174/* Called when controller lockup detected. */
8175static void fail_all_outstanding_cmds(struct ctlr_info *h)
8176{
8177 int i, refcount;
8178 struct CommandList *c;
8179 int failcount = 0;
8180
8181 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8182 for (i = 0; i < h->nr_cmds; i++) {
8183 c = h->cmd_pool + i;
8184 refcount = atomic_inc_return(&c->refcount);
8185 if (refcount > 1) {
8186 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8187 finish_cmd(c);
8188 atomic_dec(&h->commands_outstanding);
8189 failcount++;
8190 }
8191 cmd_free(h, c);
8192 }
8193 dev_warn(&h->pdev->dev,
8194 "failed %d commands in fail_all\n", failcount);
8195}
8196
8197static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8198{
8199 int cpu;
8200
8201 for_each_online_cpu(cpu) {
8202 u32 *lockup_detected;
8203 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8204 *lockup_detected = value;
8205 }
8206 wmb(); /* be sure the per-cpu variables are out to memory */
8207}
8208
8209static void controller_lockup_detected(struct ctlr_info *h)
8210{
8211 unsigned long flags;
8212 u32 lockup_detected;
8213
8214 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8215 spin_lock_irqsave(&h->lock, flags);
8216 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8217 if (!lockup_detected) {
8218 /* no heartbeat, but controller gave us a zero. */
8219 dev_warn(&h->pdev->dev,
8220 "lockup detected after %d but scratchpad register is zero\n",
8221 h->heartbeat_sample_interval / HZ);
8222 lockup_detected = 0xffffffff;
8223 }
8224 set_lockup_detected_for_all_cpus(h, lockup_detected);
8225 spin_unlock_irqrestore(&h->lock, flags);
8226 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8227 lockup_detected, h->heartbeat_sample_interval / HZ);
8228 if (lockup_detected == 0xffff0000) {
8229 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8230 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8231 }
8232 pci_disable_device(h->pdev);
8233 fail_all_outstanding_cmds(h);
8234}
8235
8236static int detect_controller_lockup(struct ctlr_info *h)
8237{
8238 u64 now;
8239 u32 heartbeat;
8240 unsigned long flags;
8241
8242 now = get_jiffies_64();
8243 /* If we've received an interrupt recently, we're ok. */
8244 if (time_after64(h->last_intr_timestamp +
8245 (h->heartbeat_sample_interval), now))
8246 return false;
8247
8248 /*
8249 * If we've already checked the heartbeat recently, we're ok.
8250 * This could happen if someone sends us a signal. We
8251 * otherwise don't care about signals in this thread.
8252 */
8253 if (time_after64(h->last_heartbeat_timestamp +
8254 (h->heartbeat_sample_interval), now))
8255 return false;
8256
8257 /* If heartbeat has not changed since we last looked, we're not ok. */
8258 spin_lock_irqsave(&h->lock, flags);
8259 heartbeat = readl(&h->cfgtable->HeartBeat);
8260 spin_unlock_irqrestore(&h->lock, flags);
8261 if (h->last_heartbeat == heartbeat) {
8262 controller_lockup_detected(h);
8263 return true;
8264 }
8265
8266 /* We're ok. */
8267 h->last_heartbeat = heartbeat;
8268 h->last_heartbeat_timestamp = now;
8269 return false;
8270}
8271
8272/*
8273 * Set ioaccel status for all ioaccel volumes.
8274 *
8275 * Called from monitor controller worker (hpsa_event_monitor_worker)
8276 *
8277 * A Volume (or Volumes that comprise an Array set may be undergoing a
8278 * transformation, so we will be turning off ioaccel for all volumes that
8279 * make up the Array.
8280 */
8281static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8282{
8283 int rc;
8284 int i;
8285 u8 ioaccel_status;
8286 unsigned char *buf;
8287 struct hpsa_scsi_dev_t *device;
8288
8289 if (!h)
8290 return;
8291
8292 buf = kmalloc(64, GFP_KERNEL);
8293 if (!buf)
8294 return;
8295
8296 /*
8297 * Run through current device list used during I/O requests.
8298 */
8299 for (i = 0; i < h->ndevices; i++) {
8300 device = h->dev[i];
8301
8302 if (!device)
8303 continue;
8304 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8305 HPSA_VPD_LV_IOACCEL_STATUS))
8306 continue;
8307
8308 memset(buf, 0, 64);
8309
8310 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8311 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8312 buf, 64);
8313 if (rc != 0)
8314 continue;
8315
8316 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8317 device->offload_config =
8318 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8319 if (device->offload_config)
8320 device->offload_to_be_enabled =
8321 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8322
8323 /*
8324 * Immediately turn off ioaccel for any volume the
8325 * controller tells us to. Some of the reasons could be:
8326 * transformation - change to the LVs of an Array.
8327 * degraded volume - component failure
8328 *
8329 * If ioaccel is to be re-enabled, re-enable later during the
8330 * scan operation so the driver can get a fresh raidmap
8331 * before turning ioaccel back on.
8332 *
8333 */
8334 if (!device->offload_to_be_enabled)
8335 device->offload_enabled = 0;
8336 }
8337
8338 kfree(buf);
8339}
8340
8341static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8342{
8343 char *event_type;
8344
8345 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8346 return;
8347
8348 /* Ask the controller to clear the events we're handling. */
8349 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8350 | CFGTBL_Trans_io_accel2)) &&
8351 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8352 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8353
8354 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8355 event_type = "state change";
8356 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8357 event_type = "configuration change";
8358 /* Stop sending new RAID offload reqs via the IO accelerator */
8359 scsi_block_requests(h->scsi_host);
8360 hpsa_set_ioaccel_status(h);
8361 hpsa_drain_accel_commands(h);
8362 /* Set 'accelerator path config change' bit */
8363 dev_warn(&h->pdev->dev,
8364 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8365 h->events, event_type);
8366 writel(h->events, &(h->cfgtable->clear_event_notify));
8367 /* Set the "clear event notify field update" bit 6 */
8368 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8369 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8370 hpsa_wait_for_clear_event_notify_ack(h);
8371 scsi_unblock_requests(h->scsi_host);
8372 } else {
8373 /* Acknowledge controller notification events. */
8374 writel(h->events, &(h->cfgtable->clear_event_notify));
8375 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8376 hpsa_wait_for_clear_event_notify_ack(h);
8377 }
8378 return;
8379}
8380
8381/* Check a register on the controller to see if there are configuration
8382 * changes (added/changed/removed logical drives, etc.) which mean that
8383 * we should rescan the controller for devices.
8384 * Also check flag for driver-initiated rescan.
8385 */
8386static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8387{
8388 if (h->drv_req_rescan) {
8389 h->drv_req_rescan = 0;
8390 return 1;
8391 }
8392
8393 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8394 return 0;
8395
8396 h->events = readl(&(h->cfgtable->event_notify));
8397 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8398}
8399
8400/*
8401 * Check if any of the offline devices have become ready
8402 */
8403static int hpsa_offline_devices_ready(struct ctlr_info *h)
8404{
8405 unsigned long flags;
8406 struct offline_device_entry *d;
8407 struct list_head *this, *tmp;
8408
8409 spin_lock_irqsave(&h->offline_device_lock, flags);
8410 list_for_each_safe(this, tmp, &h->offline_device_list) {
8411 d = list_entry(this, struct offline_device_entry,
8412 offline_list);
8413 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8414 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8415 spin_lock_irqsave(&h->offline_device_lock, flags);
8416 list_del(&d->offline_list);
8417 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8418 return 1;
8419 }
8420 spin_lock_irqsave(&h->offline_device_lock, flags);
8421 }
8422 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8423 return 0;
8424}
8425
8426static int hpsa_luns_changed(struct ctlr_info *h)
8427{
8428 int rc = 1; /* assume there are changes */
8429 struct ReportLUNdata *logdev = NULL;
8430
8431 /* if we can't find out if lun data has changed,
8432 * assume that it has.
8433 */
8434
8435 if (!h->lastlogicals)
8436 return rc;
8437
8438 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8439 if (!logdev)
8440 return rc;
8441
8442 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8443 dev_warn(&h->pdev->dev,
8444 "report luns failed, can't track lun changes.\n");
8445 goto out;
8446 }
8447 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8448 dev_info(&h->pdev->dev,
8449 "Lun changes detected.\n");
8450 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8451 goto out;
8452 } else
8453 rc = 0; /* no changes detected. */
8454out:
8455 kfree(logdev);
8456 return rc;
8457}
8458
8459static void hpsa_perform_rescan(struct ctlr_info *h)
8460{
8461 struct Scsi_Host *sh = NULL;
8462 unsigned long flags;
8463
8464 /*
8465 * Do the scan after the reset
8466 */
8467 spin_lock_irqsave(&h->reset_lock, flags);
8468 if (h->reset_in_progress) {
8469 h->drv_req_rescan = 1;
8470 spin_unlock_irqrestore(&h->reset_lock, flags);
8471 return;
8472 }
8473 spin_unlock_irqrestore(&h->reset_lock, flags);
8474
8475 sh = scsi_host_get(h->scsi_host);
8476 if (sh != NULL) {
8477 hpsa_scan_start(sh);
8478 scsi_host_put(sh);
8479 h->drv_req_rescan = 0;
8480 }
8481}
8482
8483/*
8484 * watch for controller events
8485 */
8486static void hpsa_event_monitor_worker(struct work_struct *work)
8487{
8488 struct ctlr_info *h = container_of(to_delayed_work(work),
8489 struct ctlr_info, event_monitor_work);
8490 unsigned long flags;
8491
8492 spin_lock_irqsave(&h->lock, flags);
8493 if (h->remove_in_progress) {
8494 spin_unlock_irqrestore(&h->lock, flags);
8495 return;
8496 }
8497 spin_unlock_irqrestore(&h->lock, flags);
8498
8499 if (hpsa_ctlr_needs_rescan(h)) {
8500 hpsa_ack_ctlr_events(h);
8501 hpsa_perform_rescan(h);
8502 }
8503
8504 spin_lock_irqsave(&h->lock, flags);
8505 if (!h->remove_in_progress)
8506 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8507 HPSA_EVENT_MONITOR_INTERVAL);
8508 spin_unlock_irqrestore(&h->lock, flags);
8509}
8510
8511static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8512{
8513 unsigned long flags;
8514 struct ctlr_info *h = container_of(to_delayed_work(work),
8515 struct ctlr_info, rescan_ctlr_work);
8516
8517 spin_lock_irqsave(&h->lock, flags);
8518 if (h->remove_in_progress) {
8519 spin_unlock_irqrestore(&h->lock, flags);
8520 return;
8521 }
8522 spin_unlock_irqrestore(&h->lock, flags);
8523
8524 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8525 hpsa_perform_rescan(h);
8526 } else if (h->discovery_polling) {
8527 if (hpsa_luns_changed(h)) {
8528 dev_info(&h->pdev->dev,
8529 "driver discovery polling rescan.\n");
8530 hpsa_perform_rescan(h);
8531 }
8532 }
8533 spin_lock_irqsave(&h->lock, flags);
8534 if (!h->remove_in_progress)
8535 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8536 h->heartbeat_sample_interval);
8537 spin_unlock_irqrestore(&h->lock, flags);
8538}
8539
8540static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8541{
8542 unsigned long flags;
8543 struct ctlr_info *h = container_of(to_delayed_work(work),
8544 struct ctlr_info, monitor_ctlr_work);
8545
8546 detect_controller_lockup(h);
8547 if (lockup_detected(h))
8548 return;
8549
8550 spin_lock_irqsave(&h->lock, flags);
8551 if (!h->remove_in_progress)
8552 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8553 h->heartbeat_sample_interval);
8554 spin_unlock_irqrestore(&h->lock, flags);
8555}
8556
8557static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8558 char *name)
8559{
8560 struct workqueue_struct *wq = NULL;
8561
8562 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8563 if (!wq)
8564 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8565
8566 return wq;
8567}
8568
8569static void hpda_free_ctlr_info(struct ctlr_info *h)
8570{
8571 kfree(h->reply_map);
8572 kfree(h);
8573}
8574
8575static struct ctlr_info *hpda_alloc_ctlr_info(void)
8576{
8577 struct ctlr_info *h;
8578
8579 h = kzalloc(sizeof(*h), GFP_KERNEL);
8580 if (!h)
8581 return NULL;
8582
8583 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8584 if (!h->reply_map) {
8585 kfree(h);
8586 return NULL;
8587 }
8588 return h;
8589}
8590
8591static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8592{
8593 int dac, rc;
8594 struct ctlr_info *h;
8595 int try_soft_reset = 0;
8596 unsigned long flags;
8597 u32 board_id;
8598
8599 if (number_of_controllers == 0)
8600 printk(KERN_INFO DRIVER_NAME "\n");
8601
8602 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8603 if (rc < 0) {
8604 dev_warn(&pdev->dev, "Board ID not found\n");
8605 return rc;
8606 }
8607
8608 rc = hpsa_init_reset_devices(pdev, board_id);
8609 if (rc) {
8610 if (rc != -ENOTSUPP)
8611 return rc;
8612 /* If the reset fails in a particular way (it has no way to do
8613 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8614 * a soft reset once we get the controller configured up to the
8615 * point that it can accept a command.
8616 */
8617 try_soft_reset = 1;
8618 rc = 0;
8619 }
8620
8621reinit_after_soft_reset:
8622
8623 /* Command structures must be aligned on a 32-byte boundary because
8624 * the 5 lower bits of the address are used by the hardware. and by
8625 * the driver. See comments in hpsa.h for more info.
8626 */
8627 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8628 h = hpda_alloc_ctlr_info();
8629 if (!h) {
8630 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8631 return -ENOMEM;
8632 }
8633
8634 h->pdev = pdev;
8635
8636 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8637 INIT_LIST_HEAD(&h->offline_device_list);
8638 spin_lock_init(&h->lock);
8639 spin_lock_init(&h->offline_device_lock);
8640 spin_lock_init(&h->scan_lock);
8641 spin_lock_init(&h->reset_lock);
8642 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8643
8644 /* Allocate and clear per-cpu variable lockup_detected */
8645 h->lockup_detected = alloc_percpu(u32);
8646 if (!h->lockup_detected) {
8647 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8648 rc = -ENOMEM;
8649 goto clean1; /* aer/h */
8650 }
8651 set_lockup_detected_for_all_cpus(h, 0);
8652
8653 rc = hpsa_pci_init(h);
8654 if (rc)
8655 goto clean2; /* lu, aer/h */
8656
8657 /* relies on h-> settings made by hpsa_pci_init, including
8658 * interrupt_mode h->intr */
8659 rc = hpsa_scsi_host_alloc(h);
8660 if (rc)
8661 goto clean2_5; /* pci, lu, aer/h */
8662
8663 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8664 h->ctlr = number_of_controllers;
8665 number_of_controllers++;
8666
8667 /* configure PCI DMA stuff */
8668 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8669 if (rc == 0) {
8670 dac = 1;
8671 } else {
8672 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8673 if (rc == 0) {
8674 dac = 0;
8675 } else {
8676 dev_err(&pdev->dev, "no suitable DMA available\n");
8677 goto clean3; /* shost, pci, lu, aer/h */
8678 }
8679 }
8680
8681 /* make sure the board interrupts are off */
8682 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8683
8684 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8685 if (rc)
8686 goto clean3; /* shost, pci, lu, aer/h */
8687 rc = hpsa_alloc_cmd_pool(h);
8688 if (rc)
8689 goto clean4; /* irq, shost, pci, lu, aer/h */
8690 rc = hpsa_alloc_sg_chain_blocks(h);
8691 if (rc)
8692 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8693 init_waitqueue_head(&h->scan_wait_queue);
8694 init_waitqueue_head(&h->event_sync_wait_queue);
8695 mutex_init(&h->reset_mutex);
8696 h->scan_finished = 1; /* no scan currently in progress */
8697 h->scan_waiting = 0;
8698
8699 pci_set_drvdata(pdev, h);
8700 h->ndevices = 0;
8701
8702 spin_lock_init(&h->devlock);
8703 rc = hpsa_put_ctlr_into_performant_mode(h);
8704 if (rc)
8705 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8706
8707 /* create the resubmit workqueue */
8708 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8709 if (!h->rescan_ctlr_wq) {
8710 rc = -ENOMEM;
8711 goto clean7;
8712 }
8713
8714 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8715 if (!h->resubmit_wq) {
8716 rc = -ENOMEM;
8717 goto clean7; /* aer/h */
8718 }
8719
8720 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8721 if (!h->monitor_ctlr_wq) {
8722 rc = -ENOMEM;
8723 goto clean7;
8724 }
8725
8726 /*
8727 * At this point, the controller is ready to take commands.
8728 * Now, if reset_devices and the hard reset didn't work, try
8729 * the soft reset and see if that works.
8730 */
8731 if (try_soft_reset) {
8732
8733 /* This is kind of gross. We may or may not get a completion
8734 * from the soft reset command, and if we do, then the value
8735 * from the fifo may or may not be valid. So, we wait 10 secs
8736 * after the reset throwing away any completions we get during
8737 * that time. Unregister the interrupt handler and register
8738 * fake ones to scoop up any residual completions.
8739 */
8740 spin_lock_irqsave(&h->lock, flags);
8741 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8742 spin_unlock_irqrestore(&h->lock, flags);
8743 hpsa_free_irqs(h);
8744 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8745 hpsa_intx_discard_completions);
8746 if (rc) {
8747 dev_warn(&h->pdev->dev,
8748 "Failed to request_irq after soft reset.\n");
8749 /*
8750 * cannot goto clean7 or free_irqs will be called
8751 * again. Instead, do its work
8752 */
8753 hpsa_free_performant_mode(h); /* clean7 */
8754 hpsa_free_sg_chain_blocks(h); /* clean6 */
8755 hpsa_free_cmd_pool(h); /* clean5 */
8756 /*
8757 * skip hpsa_free_irqs(h) clean4 since that
8758 * was just called before request_irqs failed
8759 */
8760 goto clean3;
8761 }
8762
8763 rc = hpsa_kdump_soft_reset(h);
8764 if (rc)
8765 /* Neither hard nor soft reset worked, we're hosed. */
8766 goto clean7;
8767
8768 dev_info(&h->pdev->dev, "Board READY.\n");
8769 dev_info(&h->pdev->dev,
8770 "Waiting for stale completions to drain.\n");
8771 h->access.set_intr_mask(h, HPSA_INTR_ON);
8772 msleep(10000);
8773 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8774
8775 rc = controller_reset_failed(h->cfgtable);
8776 if (rc)
8777 dev_info(&h->pdev->dev,
8778 "Soft reset appears to have failed.\n");
8779
8780 /* since the controller's reset, we have to go back and re-init
8781 * everything. Easiest to just forget what we've done and do it
8782 * all over again.
8783 */
8784 hpsa_undo_allocations_after_kdump_soft_reset(h);
8785 try_soft_reset = 0;
8786 if (rc)
8787 /* don't goto clean, we already unallocated */
8788 return -ENODEV;
8789
8790 goto reinit_after_soft_reset;
8791 }
8792
8793 /* Enable Accelerated IO path at driver layer */
8794 h->acciopath_status = 1;
8795 /* Disable discovery polling.*/
8796 h->discovery_polling = 0;
8797
8798
8799 /* Turn the interrupts on so we can service requests */
8800 h->access.set_intr_mask(h, HPSA_INTR_ON);
8801
8802 hpsa_hba_inquiry(h);
8803
8804 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8805 if (!h->lastlogicals)
8806 dev_info(&h->pdev->dev,
8807 "Can't track change to report lun data\n");
8808
8809 /* hook into SCSI subsystem */
8810 rc = hpsa_scsi_add_host(h);
8811 if (rc)
8812 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8813
8814 /* Monitor the controller for firmware lockups */
8815 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8816 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8817 schedule_delayed_work(&h->monitor_ctlr_work,
8818 h->heartbeat_sample_interval);
8819 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8820 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8821 h->heartbeat_sample_interval);
8822 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8823 schedule_delayed_work(&h->event_monitor_work,
8824 HPSA_EVENT_MONITOR_INTERVAL);
8825 return 0;
8826
8827clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8828 hpsa_free_performant_mode(h);
8829 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8830clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8831 hpsa_free_sg_chain_blocks(h);
8832clean5: /* cmd, irq, shost, pci, lu, aer/h */
8833 hpsa_free_cmd_pool(h);
8834clean4: /* irq, shost, pci, lu, aer/h */
8835 hpsa_free_irqs(h);
8836clean3: /* shost, pci, lu, aer/h */
8837 scsi_host_put(h->scsi_host);
8838 h->scsi_host = NULL;
8839clean2_5: /* pci, lu, aer/h */
8840 hpsa_free_pci_init(h);
8841clean2: /* lu, aer/h */
8842 if (h->lockup_detected) {
8843 free_percpu(h->lockup_detected);
8844 h->lockup_detected = NULL;
8845 }
8846clean1: /* wq/aer/h */
8847 if (h->resubmit_wq) {
8848 destroy_workqueue(h->resubmit_wq);
8849 h->resubmit_wq = NULL;
8850 }
8851 if (h->rescan_ctlr_wq) {
8852 destroy_workqueue(h->rescan_ctlr_wq);
8853 h->rescan_ctlr_wq = NULL;
8854 }
8855 if (h->monitor_ctlr_wq) {
8856 destroy_workqueue(h->monitor_ctlr_wq);
8857 h->monitor_ctlr_wq = NULL;
8858 }
8859 kfree(h);
8860 return rc;
8861}
8862
8863static void hpsa_flush_cache(struct ctlr_info *h)
8864{
8865 char *flush_buf;
8866 struct CommandList *c;
8867 int rc;
8868
8869 if (unlikely(lockup_detected(h)))
8870 return;
8871 flush_buf = kzalloc(4, GFP_KERNEL);
8872 if (!flush_buf)
8873 return;
8874
8875 c = cmd_alloc(h);
8876
8877 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8878 RAID_CTLR_LUNID, TYPE_CMD)) {
8879 goto out;
8880 }
8881 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8882 DEFAULT_TIMEOUT);
8883 if (rc)
8884 goto out;
8885 if (c->err_info->CommandStatus != 0)
8886out:
8887 dev_warn(&h->pdev->dev,
8888 "error flushing cache on controller\n");
8889 cmd_free(h, c);
8890 kfree(flush_buf);
8891}
8892
8893/* Make controller gather fresh report lun data each time we
8894 * send down a report luns request
8895 */
8896static void hpsa_disable_rld_caching(struct ctlr_info *h)
8897{
8898 u32 *options;
8899 struct CommandList *c;
8900 int rc;
8901
8902 /* Don't bother trying to set diag options if locked up */
8903 if (unlikely(h->lockup_detected))
8904 return;
8905
8906 options = kzalloc(sizeof(*options), GFP_KERNEL);
8907 if (!options)
8908 return;
8909
8910 c = cmd_alloc(h);
8911
8912 /* first, get the current diag options settings */
8913 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8914 RAID_CTLR_LUNID, TYPE_CMD))
8915 goto errout;
8916
8917 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8918 NO_TIMEOUT);
8919 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8920 goto errout;
8921
8922 /* Now, set the bit for disabling the RLD caching */
8923 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8924
8925 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8926 RAID_CTLR_LUNID, TYPE_CMD))
8927 goto errout;
8928
8929 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8930 NO_TIMEOUT);
8931 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8932 goto errout;
8933
8934 /* Now verify that it got set: */
8935 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8936 RAID_CTLR_LUNID, TYPE_CMD))
8937 goto errout;
8938
8939 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8940 NO_TIMEOUT);
8941 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8942 goto errout;
8943
8944 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8945 goto out;
8946
8947errout:
8948 dev_err(&h->pdev->dev,
8949 "Error: failed to disable report lun data caching.\n");
8950out:
8951 cmd_free(h, c);
8952 kfree(options);
8953}
8954
8955static void __hpsa_shutdown(struct pci_dev *pdev)
8956{
8957 struct ctlr_info *h;
8958
8959 h = pci_get_drvdata(pdev);
8960 /* Turn board interrupts off and send the flush cache command
8961 * sendcmd will turn off interrupt, and send the flush...
8962 * To write all data in the battery backed cache to disks
8963 */
8964 hpsa_flush_cache(h);
8965 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8966 hpsa_free_irqs(h); /* init_one 4 */
8967 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8968}
8969
8970static void hpsa_shutdown(struct pci_dev *pdev)
8971{
8972 __hpsa_shutdown(pdev);
8973 pci_disable_device(pdev);
8974}
8975
8976static void hpsa_free_device_info(struct ctlr_info *h)
8977{
8978 int i;
8979
8980 for (i = 0; i < h->ndevices; i++) {
8981 kfree(h->dev[i]);
8982 h->dev[i] = NULL;
8983 }
8984}
8985
8986static void hpsa_remove_one(struct pci_dev *pdev)
8987{
8988 struct ctlr_info *h;
8989 unsigned long flags;
8990
8991 if (pci_get_drvdata(pdev) == NULL) {
8992 dev_err(&pdev->dev, "unable to remove device\n");
8993 return;
8994 }
8995 h = pci_get_drvdata(pdev);
8996
8997 /* Get rid of any controller monitoring work items */
8998 spin_lock_irqsave(&h->lock, flags);
8999 h->remove_in_progress = 1;
9000 spin_unlock_irqrestore(&h->lock, flags);
9001 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9002 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9003 cancel_delayed_work_sync(&h->event_monitor_work);
9004 destroy_workqueue(h->rescan_ctlr_wq);
9005 destroy_workqueue(h->resubmit_wq);
9006 destroy_workqueue(h->monitor_ctlr_wq);
9007
9008 hpsa_delete_sas_host(h);
9009
9010 /*
9011 * Call before disabling interrupts.
9012 * scsi_remove_host can trigger I/O operations especially
9013 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9014 * operations which cannot complete and will hang the system.
9015 */
9016 if (h->scsi_host)
9017 scsi_remove_host(h->scsi_host); /* init_one 8 */
9018 /* includes hpsa_free_irqs - init_one 4 */
9019 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9020 __hpsa_shutdown(pdev);
9021
9022 hpsa_free_device_info(h); /* scan */
9023
9024 kfree(h->hba_inquiry_data); /* init_one 10 */
9025 h->hba_inquiry_data = NULL; /* init_one 10 */
9026 hpsa_free_ioaccel2_sg_chain_blocks(h);
9027 hpsa_free_performant_mode(h); /* init_one 7 */
9028 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
9029 hpsa_free_cmd_pool(h); /* init_one 5 */
9030 kfree(h->lastlogicals);
9031
9032 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9033
9034 scsi_host_put(h->scsi_host); /* init_one 3 */
9035 h->scsi_host = NULL; /* init_one 3 */
9036
9037 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9038 hpsa_free_pci_init(h); /* init_one 2.5 */
9039
9040 free_percpu(h->lockup_detected); /* init_one 2 */
9041 h->lockup_detected = NULL; /* init_one 2 */
9042 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9043
9044 hpda_free_ctlr_info(h); /* init_one 1 */
9045}
9046
9047static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9048 __attribute__((unused)) pm_message_t state)
9049{
9050 return -ENOSYS;
9051}
9052
9053static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9054{
9055 return -ENOSYS;
9056}
9057
9058static struct pci_driver hpsa_pci_driver = {
9059 .name = HPSA,
9060 .probe = hpsa_init_one,
9061 .remove = hpsa_remove_one,
9062 .id_table = hpsa_pci_device_id, /* id_table */
9063 .shutdown = hpsa_shutdown,
9064 .suspend = hpsa_suspend,
9065 .resume = hpsa_resume,
9066};
9067
9068/* Fill in bucket_map[], given nsgs (the max number of
9069 * scatter gather elements supported) and bucket[],
9070 * which is an array of 8 integers. The bucket[] array
9071 * contains 8 different DMA transfer sizes (in 16
9072 * byte increments) which the controller uses to fetch
9073 * commands. This function fills in bucket_map[], which
9074 * maps a given number of scatter gather elements to one of
9075 * the 8 DMA transfer sizes. The point of it is to allow the
9076 * controller to only do as much DMA as needed to fetch the
9077 * command, with the DMA transfer size encoded in the lower
9078 * bits of the command address.
9079 */
9080static void calc_bucket_map(int bucket[], int num_buckets,
9081 int nsgs, int min_blocks, u32 *bucket_map)
9082{
9083 int i, j, b, size;
9084
9085 /* Note, bucket_map must have nsgs+1 entries. */
9086 for (i = 0; i <= nsgs; i++) {
9087 /* Compute size of a command with i SG entries */
9088 size = i + min_blocks;
9089 b = num_buckets; /* Assume the biggest bucket */
9090 /* Find the bucket that is just big enough */
9091 for (j = 0; j < num_buckets; j++) {
9092 if (bucket[j] >= size) {
9093 b = j;
9094 break;
9095 }
9096 }
9097 /* for a command with i SG entries, use bucket b. */
9098 bucket_map[i] = b;
9099 }
9100}
9101
9102/*
9103 * return -ENODEV on err, 0 on success (or no action)
9104 * allocates numerous items that must be freed later
9105 */
9106static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9107{
9108 int i;
9109 unsigned long register_value;
9110 unsigned long transMethod = CFGTBL_Trans_Performant |
9111 (trans_support & CFGTBL_Trans_use_short_tags) |
9112 CFGTBL_Trans_enable_directed_msix |
9113 (trans_support & (CFGTBL_Trans_io_accel1 |
9114 CFGTBL_Trans_io_accel2));
9115 struct access_method access = SA5_performant_access;
9116
9117 /* This is a bit complicated. There are 8 registers on
9118 * the controller which we write to to tell it 8 different
9119 * sizes of commands which there may be. It's a way of
9120 * reducing the DMA done to fetch each command. Encoded into
9121 * each command's tag are 3 bits which communicate to the controller
9122 * which of the eight sizes that command fits within. The size of
9123 * each command depends on how many scatter gather entries there are.
9124 * Each SG entry requires 16 bytes. The eight registers are programmed
9125 * with the number of 16-byte blocks a command of that size requires.
9126 * The smallest command possible requires 5 such 16 byte blocks.
9127 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9128 * blocks. Note, this only extends to the SG entries contained
9129 * within the command block, and does not extend to chained blocks
9130 * of SG elements. bft[] contains the eight values we write to
9131 * the registers. They are not evenly distributed, but have more
9132 * sizes for small commands, and fewer sizes for larger commands.
9133 */
9134 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9135#define MIN_IOACCEL2_BFT_ENTRY 5
9136#define HPSA_IOACCEL2_HEADER_SZ 4
9137 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9138 13, 14, 15, 16, 17, 18, 19,
9139 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9140 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9141 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9142 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9143 16 * MIN_IOACCEL2_BFT_ENTRY);
9144 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9145 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9146 /* 5 = 1 s/g entry or 4k
9147 * 6 = 2 s/g entry or 8k
9148 * 8 = 4 s/g entry or 16k
9149 * 10 = 6 s/g entry or 24k
9150 */
9151
9152 /* If the controller supports either ioaccel method then
9153 * we can also use the RAID stack submit path that does not
9154 * perform the superfluous readl() after each command submission.
9155 */
9156 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9157 access = SA5_performant_access_no_read;
9158
9159 /* Controller spec: zero out this buffer. */
9160 for (i = 0; i < h->nreply_queues; i++)
9161 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9162
9163 bft[7] = SG_ENTRIES_IN_CMD + 4;
9164 calc_bucket_map(bft, ARRAY_SIZE(bft),
9165 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9166 for (i = 0; i < 8; i++)
9167 writel(bft[i], &h->transtable->BlockFetch[i]);
9168
9169 /* size of controller ring buffer */
9170 writel(h->max_commands, &h->transtable->RepQSize);
9171 writel(h->nreply_queues, &h->transtable->RepQCount);
9172 writel(0, &h->transtable->RepQCtrAddrLow32);
9173 writel(0, &h->transtable->RepQCtrAddrHigh32);
9174
9175 for (i = 0; i < h->nreply_queues; i++) {
9176 writel(0, &h->transtable->RepQAddr[i].upper);
9177 writel(h->reply_queue[i].busaddr,
9178 &h->transtable->RepQAddr[i].lower);
9179 }
9180
9181 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9182 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9183 /*
9184 * enable outbound interrupt coalescing in accelerator mode;
9185 */
9186 if (trans_support & CFGTBL_Trans_io_accel1) {
9187 access = SA5_ioaccel_mode1_access;
9188 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9189 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9190 } else
9191 if (trans_support & CFGTBL_Trans_io_accel2)
9192 access = SA5_ioaccel_mode2_access;
9193 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9194 if (hpsa_wait_for_mode_change_ack(h)) {
9195 dev_err(&h->pdev->dev,
9196 "performant mode problem - doorbell timeout\n");
9197 return -ENODEV;
9198 }
9199 register_value = readl(&(h->cfgtable->TransportActive));
9200 if (!(register_value & CFGTBL_Trans_Performant)) {
9201 dev_err(&h->pdev->dev,
9202 "performant mode problem - transport not active\n");
9203 return -ENODEV;
9204 }
9205 /* Change the access methods to the performant access methods */
9206 h->access = access;
9207 h->transMethod = transMethod;
9208
9209 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9210 (trans_support & CFGTBL_Trans_io_accel2)))
9211 return 0;
9212
9213 if (trans_support & CFGTBL_Trans_io_accel1) {
9214 /* Set up I/O accelerator mode */
9215 for (i = 0; i < h->nreply_queues; i++) {
9216 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9217 h->reply_queue[i].current_entry =
9218 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9219 }
9220 bft[7] = h->ioaccel_maxsg + 8;
9221 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9222 h->ioaccel1_blockFetchTable);
9223
9224 /* initialize all reply queue entries to unused */
9225 for (i = 0; i < h->nreply_queues; i++)
9226 memset(h->reply_queue[i].head,
9227 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9228 h->reply_queue_size);
9229
9230 /* set all the constant fields in the accelerator command
9231 * frames once at init time to save CPU cycles later.
9232 */
9233 for (i = 0; i < h->nr_cmds; i++) {
9234 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9235
9236 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9237 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9238 (i * sizeof(struct ErrorInfo)));
9239 cp->err_info_len = sizeof(struct ErrorInfo);
9240 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9241 cp->host_context_flags =
9242 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9243 cp->timeout_sec = 0;
9244 cp->ReplyQueue = 0;
9245 cp->tag =
9246 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9247 cp->host_addr =
9248 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9249 (i * sizeof(struct io_accel1_cmd)));
9250 }
9251 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9252 u64 cfg_offset, cfg_base_addr_index;
9253 u32 bft2_offset, cfg_base_addr;
9254 int rc;
9255
9256 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9257 &cfg_base_addr_index, &cfg_offset);
9258 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9259 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9260 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9261 4, h->ioaccel2_blockFetchTable);
9262 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9263 BUILD_BUG_ON(offsetof(struct CfgTable,
9264 io_accel_request_size_offset) != 0xb8);
9265 h->ioaccel2_bft2_regs =
9266 remap_pci_mem(pci_resource_start(h->pdev,
9267 cfg_base_addr_index) +
9268 cfg_offset + bft2_offset,
9269 ARRAY_SIZE(bft2) *
9270 sizeof(*h->ioaccel2_bft2_regs));
9271 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9272 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9273 }
9274 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9275 if (hpsa_wait_for_mode_change_ack(h)) {
9276 dev_err(&h->pdev->dev,
9277 "performant mode problem - enabling ioaccel mode\n");
9278 return -ENODEV;
9279 }
9280 return 0;
9281}
9282
9283/* Free ioaccel1 mode command blocks and block fetch table */
9284static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9285{
9286 if (h->ioaccel_cmd_pool) {
9287 pci_free_consistent(h->pdev,
9288 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9289 h->ioaccel_cmd_pool,
9290 h->ioaccel_cmd_pool_dhandle);
9291 h->ioaccel_cmd_pool = NULL;
9292 h->ioaccel_cmd_pool_dhandle = 0;
9293 }
9294 kfree(h->ioaccel1_blockFetchTable);
9295 h->ioaccel1_blockFetchTable = NULL;
9296}
9297
9298/* Allocate ioaccel1 mode command blocks and block fetch table */
9299static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9300{
9301 h->ioaccel_maxsg =
9302 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9303 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9304 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9305
9306 /* Command structures must be aligned on a 128-byte boundary
9307 * because the 7 lower bits of the address are used by the
9308 * hardware.
9309 */
9310 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9311 IOACCEL1_COMMANDLIST_ALIGNMENT);
9312 h->ioaccel_cmd_pool =
9313 dma_alloc_coherent(&h->pdev->dev,
9314 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9315 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9316
9317 h->ioaccel1_blockFetchTable =
9318 kmalloc(((h->ioaccel_maxsg + 1) *
9319 sizeof(u32)), GFP_KERNEL);
9320
9321 if ((h->ioaccel_cmd_pool == NULL) ||
9322 (h->ioaccel1_blockFetchTable == NULL))
9323 goto clean_up;
9324
9325 memset(h->ioaccel_cmd_pool, 0,
9326 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9327 return 0;
9328
9329clean_up:
9330 hpsa_free_ioaccel1_cmd_and_bft(h);
9331 return -ENOMEM;
9332}
9333
9334/* Free ioaccel2 mode command blocks and block fetch table */
9335static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9336{
9337 hpsa_free_ioaccel2_sg_chain_blocks(h);
9338
9339 if (h->ioaccel2_cmd_pool) {
9340 pci_free_consistent(h->pdev,
9341 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9342 h->ioaccel2_cmd_pool,
9343 h->ioaccel2_cmd_pool_dhandle);
9344 h->ioaccel2_cmd_pool = NULL;
9345 h->ioaccel2_cmd_pool_dhandle = 0;
9346 }
9347 kfree(h->ioaccel2_blockFetchTable);
9348 h->ioaccel2_blockFetchTable = NULL;
9349}
9350
9351/* Allocate ioaccel2 mode command blocks and block fetch table */
9352static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9353{
9354 int rc;
9355
9356 /* Allocate ioaccel2 mode command blocks and block fetch table */
9357
9358 h->ioaccel_maxsg =
9359 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9360 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9361 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9362
9363 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9364 IOACCEL2_COMMANDLIST_ALIGNMENT);
9365 h->ioaccel2_cmd_pool =
9366 dma_alloc_coherent(&h->pdev->dev,
9367 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9368 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9369
9370 h->ioaccel2_blockFetchTable =
9371 kmalloc(((h->ioaccel_maxsg + 1) *
9372 sizeof(u32)), GFP_KERNEL);
9373
9374 if ((h->ioaccel2_cmd_pool == NULL) ||
9375 (h->ioaccel2_blockFetchTable == NULL)) {
9376 rc = -ENOMEM;
9377 goto clean_up;
9378 }
9379
9380 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9381 if (rc)
9382 goto clean_up;
9383
9384 memset(h->ioaccel2_cmd_pool, 0,
9385 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9386 return 0;
9387
9388clean_up:
9389 hpsa_free_ioaccel2_cmd_and_bft(h);
9390 return rc;
9391}
9392
9393/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9394static void hpsa_free_performant_mode(struct ctlr_info *h)
9395{
9396 kfree(h->blockFetchTable);
9397 h->blockFetchTable = NULL;
9398 hpsa_free_reply_queues(h);
9399 hpsa_free_ioaccel1_cmd_and_bft(h);
9400 hpsa_free_ioaccel2_cmd_and_bft(h);
9401}
9402
9403/* return -ENODEV on error, 0 on success (or no action)
9404 * allocates numerous items that must be freed later
9405 */
9406static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9407{
9408 u32 trans_support;
9409 unsigned long transMethod = CFGTBL_Trans_Performant |
9410 CFGTBL_Trans_use_short_tags;
9411 int i, rc;
9412
9413 if (hpsa_simple_mode)
9414 return 0;
9415
9416 trans_support = readl(&(h->cfgtable->TransportSupport));
9417 if (!(trans_support & PERFORMANT_MODE))
9418 return 0;
9419
9420 /* Check for I/O accelerator mode support */
9421 if (trans_support & CFGTBL_Trans_io_accel1) {
9422 transMethod |= CFGTBL_Trans_io_accel1 |
9423 CFGTBL_Trans_enable_directed_msix;
9424 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9425 if (rc)
9426 return rc;
9427 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9428 transMethod |= CFGTBL_Trans_io_accel2 |
9429 CFGTBL_Trans_enable_directed_msix;
9430 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9431 if (rc)
9432 return rc;
9433 }
9434
9435 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9436 hpsa_get_max_perf_mode_cmds(h);
9437 /* Performant mode ring buffer and supporting data structures */
9438 h->reply_queue_size = h->max_commands * sizeof(u64);
9439
9440 for (i = 0; i < h->nreply_queues; i++) {
9441 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9442 h->reply_queue_size,
9443 &h->reply_queue[i].busaddr,
9444 GFP_KERNEL);
9445 if (!h->reply_queue[i].head) {
9446 rc = -ENOMEM;
9447 goto clean1; /* rq, ioaccel */
9448 }
9449 h->reply_queue[i].size = h->max_commands;
9450 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9451 h->reply_queue[i].current_entry = 0;
9452 }
9453
9454 /* Need a block fetch table for performant mode */
9455 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9456 sizeof(u32)), GFP_KERNEL);
9457 if (!h->blockFetchTable) {
9458 rc = -ENOMEM;
9459 goto clean1; /* rq, ioaccel */
9460 }
9461
9462 rc = hpsa_enter_performant_mode(h, trans_support);
9463 if (rc)
9464 goto clean2; /* bft, rq, ioaccel */
9465 return 0;
9466
9467clean2: /* bft, rq, ioaccel */
9468 kfree(h->blockFetchTable);
9469 h->blockFetchTable = NULL;
9470clean1: /* rq, ioaccel */
9471 hpsa_free_reply_queues(h);
9472 hpsa_free_ioaccel1_cmd_and_bft(h);
9473 hpsa_free_ioaccel2_cmd_and_bft(h);
9474 return rc;
9475}
9476
9477static int is_accelerated_cmd(struct CommandList *c)
9478{
9479 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9480}
9481
9482static void hpsa_drain_accel_commands(struct ctlr_info *h)
9483{
9484 struct CommandList *c = NULL;
9485 int i, accel_cmds_out;
9486 int refcount;
9487
9488 do { /* wait for all outstanding ioaccel commands to drain out */
9489 accel_cmds_out = 0;
9490 for (i = 0; i < h->nr_cmds; i++) {
9491 c = h->cmd_pool + i;
9492 refcount = atomic_inc_return(&c->refcount);
9493 if (refcount > 1) /* Command is allocated */
9494 accel_cmds_out += is_accelerated_cmd(c);
9495 cmd_free(h, c);
9496 }
9497 if (accel_cmds_out <= 0)
9498 break;
9499 msleep(100);
9500 } while (1);
9501}
9502
9503static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9504 struct hpsa_sas_port *hpsa_sas_port)
9505{
9506 struct hpsa_sas_phy *hpsa_sas_phy;
9507 struct sas_phy *phy;
9508
9509 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9510 if (!hpsa_sas_phy)
9511 return NULL;
9512
9513 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9514 hpsa_sas_port->next_phy_index);
9515 if (!phy) {
9516 kfree(hpsa_sas_phy);
9517 return NULL;
9518 }
9519
9520 hpsa_sas_port->next_phy_index++;
9521 hpsa_sas_phy->phy = phy;
9522 hpsa_sas_phy->parent_port = hpsa_sas_port;
9523
9524 return hpsa_sas_phy;
9525}
9526
9527static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9528{
9529 struct sas_phy *phy = hpsa_sas_phy->phy;
9530
9531 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9532 if (hpsa_sas_phy->added_to_port)
9533 list_del(&hpsa_sas_phy->phy_list_entry);
9534 sas_phy_delete(phy);
9535 kfree(hpsa_sas_phy);
9536}
9537
9538static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9539{
9540 int rc;
9541 struct hpsa_sas_port *hpsa_sas_port;
9542 struct sas_phy *phy;
9543 struct sas_identify *identify;
9544
9545 hpsa_sas_port = hpsa_sas_phy->parent_port;
9546 phy = hpsa_sas_phy->phy;
9547
9548 identify = &phy->identify;
9549 memset(identify, 0, sizeof(*identify));
9550 identify->sas_address = hpsa_sas_port->sas_address;
9551 identify->device_type = SAS_END_DEVICE;
9552 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9553 identify->target_port_protocols = SAS_PROTOCOL_STP;
9554 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9555 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9556 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9557 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9558 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9559
9560 rc = sas_phy_add(hpsa_sas_phy->phy);
9561 if (rc)
9562 return rc;
9563
9564 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9565 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9566 &hpsa_sas_port->phy_list_head);
9567 hpsa_sas_phy->added_to_port = true;
9568
9569 return 0;
9570}
9571
9572static int
9573 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9574 struct sas_rphy *rphy)
9575{
9576 struct sas_identify *identify;
9577
9578 identify = &rphy->identify;
9579 identify->sas_address = hpsa_sas_port->sas_address;
9580 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9581 identify->target_port_protocols = SAS_PROTOCOL_STP;
9582
9583 return sas_rphy_add(rphy);
9584}
9585
9586static struct hpsa_sas_port
9587 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9588 u64 sas_address)
9589{
9590 int rc;
9591 struct hpsa_sas_port *hpsa_sas_port;
9592 struct sas_port *port;
9593
9594 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9595 if (!hpsa_sas_port)
9596 return NULL;
9597
9598 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9599 hpsa_sas_port->parent_node = hpsa_sas_node;
9600
9601 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9602 if (!port)
9603 goto free_hpsa_port;
9604
9605 rc = sas_port_add(port);
9606 if (rc)
9607 goto free_sas_port;
9608
9609 hpsa_sas_port->port = port;
9610 hpsa_sas_port->sas_address = sas_address;
9611 list_add_tail(&hpsa_sas_port->port_list_entry,
9612 &hpsa_sas_node->port_list_head);
9613
9614 return hpsa_sas_port;
9615
9616free_sas_port:
9617 sas_port_free(port);
9618free_hpsa_port:
9619 kfree(hpsa_sas_port);
9620
9621 return NULL;
9622}
9623
9624static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9625{
9626 struct hpsa_sas_phy *hpsa_sas_phy;
9627 struct hpsa_sas_phy *next;
9628
9629 list_for_each_entry_safe(hpsa_sas_phy, next,
9630 &hpsa_sas_port->phy_list_head, phy_list_entry)
9631 hpsa_free_sas_phy(hpsa_sas_phy);
9632
9633 sas_port_delete(hpsa_sas_port->port);
9634 list_del(&hpsa_sas_port->port_list_entry);
9635 kfree(hpsa_sas_port);
9636}
9637
9638static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9639{
9640 struct hpsa_sas_node *hpsa_sas_node;
9641
9642 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9643 if (hpsa_sas_node) {
9644 hpsa_sas_node->parent_dev = parent_dev;
9645 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9646 }
9647
9648 return hpsa_sas_node;
9649}
9650
9651static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9652{
9653 struct hpsa_sas_port *hpsa_sas_port;
9654 struct hpsa_sas_port *next;
9655
9656 if (!hpsa_sas_node)
9657 return;
9658
9659 list_for_each_entry_safe(hpsa_sas_port, next,
9660 &hpsa_sas_node->port_list_head, port_list_entry)
9661 hpsa_free_sas_port(hpsa_sas_port);
9662
9663 kfree(hpsa_sas_node);
9664}
9665
9666static struct hpsa_scsi_dev_t
9667 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9668 struct sas_rphy *rphy)
9669{
9670 int i;
9671 struct hpsa_scsi_dev_t *device;
9672
9673 for (i = 0; i < h->ndevices; i++) {
9674 device = h->dev[i];
9675 if (!device->sas_port)
9676 continue;
9677 if (device->sas_port->rphy == rphy)
9678 return device;
9679 }
9680
9681 return NULL;
9682}
9683
9684static int hpsa_add_sas_host(struct ctlr_info *h)
9685{
9686 int rc;
9687 struct device *parent_dev;
9688 struct hpsa_sas_node *hpsa_sas_node;
9689 struct hpsa_sas_port *hpsa_sas_port;
9690 struct hpsa_sas_phy *hpsa_sas_phy;
9691
9692 parent_dev = &h->scsi_host->shost_dev;
9693
9694 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9695 if (!hpsa_sas_node)
9696 return -ENOMEM;
9697
9698 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9699 if (!hpsa_sas_port) {
9700 rc = -ENODEV;
9701 goto free_sas_node;
9702 }
9703
9704 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9705 if (!hpsa_sas_phy) {
9706 rc = -ENODEV;
9707 goto free_sas_port;
9708 }
9709
9710 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9711 if (rc)
9712 goto free_sas_phy;
9713
9714 h->sas_host = hpsa_sas_node;
9715
9716 return 0;
9717
9718free_sas_phy:
9719 hpsa_free_sas_phy(hpsa_sas_phy);
9720free_sas_port:
9721 hpsa_free_sas_port(hpsa_sas_port);
9722free_sas_node:
9723 hpsa_free_sas_node(hpsa_sas_node);
9724
9725 return rc;
9726}
9727
9728static void hpsa_delete_sas_host(struct ctlr_info *h)
9729{
9730 hpsa_free_sas_node(h->sas_host);
9731}
9732
9733static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9734 struct hpsa_scsi_dev_t *device)
9735{
9736 int rc;
9737 struct hpsa_sas_port *hpsa_sas_port;
9738 struct sas_rphy *rphy;
9739
9740 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9741 if (!hpsa_sas_port)
9742 return -ENOMEM;
9743
9744 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9745 if (!rphy) {
9746 rc = -ENODEV;
9747 goto free_sas_port;
9748 }
9749
9750 hpsa_sas_port->rphy = rphy;
9751 device->sas_port = hpsa_sas_port;
9752
9753 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9754 if (rc)
9755 goto free_sas_port;
9756
9757 return 0;
9758
9759free_sas_port:
9760 hpsa_free_sas_port(hpsa_sas_port);
9761 device->sas_port = NULL;
9762
9763 return rc;
9764}
9765
9766static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9767{
9768 if (device->sas_port) {
9769 hpsa_free_sas_port(device->sas_port);
9770 device->sas_port = NULL;
9771 }
9772}
9773
9774static int
9775hpsa_sas_get_linkerrors(struct sas_phy *phy)
9776{
9777 return 0;
9778}
9779
9780static int
9781hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9782{
9783 struct Scsi_Host *shost = phy_to_shost(rphy);
9784 struct ctlr_info *h;
9785 struct hpsa_scsi_dev_t *sd;
9786
9787 if (!shost)
9788 return -ENXIO;
9789
9790 h = shost_to_hba(shost);
9791
9792 if (!h)
9793 return -ENXIO;
9794
9795 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9796 if (!sd)
9797 return -ENXIO;
9798
9799 *identifier = sd->eli;
9800
9801 return 0;
9802}
9803
9804static int
9805hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9806{
9807 return -ENXIO;
9808}
9809
9810static int
9811hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9812{
9813 return 0;
9814}
9815
9816static int
9817hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9818{
9819 return 0;
9820}
9821
9822static int
9823hpsa_sas_phy_setup(struct sas_phy *phy)
9824{
9825 return 0;
9826}
9827
9828static void
9829hpsa_sas_phy_release(struct sas_phy *phy)
9830{
9831}
9832
9833static int
9834hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9835{
9836 return -EINVAL;
9837}
9838
9839static struct sas_function_template hpsa_sas_transport_functions = {
9840 .get_linkerrors = hpsa_sas_get_linkerrors,
9841 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9842 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9843 .phy_reset = hpsa_sas_phy_reset,
9844 .phy_enable = hpsa_sas_phy_enable,
9845 .phy_setup = hpsa_sas_phy_setup,
9846 .phy_release = hpsa_sas_phy_release,
9847 .set_phy_speed = hpsa_sas_phy_speed,
9848};
9849
9850/*
9851 * This is it. Register the PCI driver information for the cards we control
9852 * the OS will call our registered routines when it finds one of our cards.
9853 */
9854static int __init hpsa_init(void)
9855{
9856 int rc;
9857
9858 hpsa_sas_transport_template =
9859 sas_attach_transport(&hpsa_sas_transport_functions);
9860 if (!hpsa_sas_transport_template)
9861 return -ENODEV;
9862
9863 rc = pci_register_driver(&hpsa_pci_driver);
9864
9865 if (rc)
9866 sas_release_transport(hpsa_sas_transport_template);
9867
9868 return rc;
9869}
9870
9871static void __exit hpsa_cleanup(void)
9872{
9873 pci_unregister_driver(&hpsa_pci_driver);
9874 sas_release_transport(hpsa_sas_transport_template);
9875}
9876
9877static void __attribute__((unused)) verify_offsets(void)
9878{
9879#define VERIFY_OFFSET(member, offset) \
9880 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9881
9882 VERIFY_OFFSET(structure_size, 0);
9883 VERIFY_OFFSET(volume_blk_size, 4);
9884 VERIFY_OFFSET(volume_blk_cnt, 8);
9885 VERIFY_OFFSET(phys_blk_shift, 16);
9886 VERIFY_OFFSET(parity_rotation_shift, 17);
9887 VERIFY_OFFSET(strip_size, 18);
9888 VERIFY_OFFSET(disk_starting_blk, 20);
9889 VERIFY_OFFSET(disk_blk_cnt, 28);
9890 VERIFY_OFFSET(data_disks_per_row, 36);
9891 VERIFY_OFFSET(metadata_disks_per_row, 38);
9892 VERIFY_OFFSET(row_cnt, 40);
9893 VERIFY_OFFSET(layout_map_count, 42);
9894 VERIFY_OFFSET(flags, 44);
9895 VERIFY_OFFSET(dekindex, 46);
9896 /* VERIFY_OFFSET(reserved, 48 */
9897 VERIFY_OFFSET(data, 64);
9898
9899#undef VERIFY_OFFSET
9900
9901#define VERIFY_OFFSET(member, offset) \
9902 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9903
9904 VERIFY_OFFSET(IU_type, 0);
9905 VERIFY_OFFSET(direction, 1);
9906 VERIFY_OFFSET(reply_queue, 2);
9907 /* VERIFY_OFFSET(reserved1, 3); */
9908 VERIFY_OFFSET(scsi_nexus, 4);
9909 VERIFY_OFFSET(Tag, 8);
9910 VERIFY_OFFSET(cdb, 16);
9911 VERIFY_OFFSET(cciss_lun, 32);
9912 VERIFY_OFFSET(data_len, 40);
9913 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9914 VERIFY_OFFSET(sg_count, 45);
9915 /* VERIFY_OFFSET(reserved3 */
9916 VERIFY_OFFSET(err_ptr, 48);
9917 VERIFY_OFFSET(err_len, 56);
9918 /* VERIFY_OFFSET(reserved4 */
9919 VERIFY_OFFSET(sg, 64);
9920
9921#undef VERIFY_OFFSET
9922
9923#define VERIFY_OFFSET(member, offset) \
9924 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9925
9926 VERIFY_OFFSET(dev_handle, 0x00);
9927 VERIFY_OFFSET(reserved1, 0x02);
9928 VERIFY_OFFSET(function, 0x03);
9929 VERIFY_OFFSET(reserved2, 0x04);
9930 VERIFY_OFFSET(err_info, 0x0C);
9931 VERIFY_OFFSET(reserved3, 0x10);
9932 VERIFY_OFFSET(err_info_len, 0x12);
9933 VERIFY_OFFSET(reserved4, 0x13);
9934 VERIFY_OFFSET(sgl_offset, 0x14);
9935 VERIFY_OFFSET(reserved5, 0x15);
9936 VERIFY_OFFSET(transfer_len, 0x1C);
9937 VERIFY_OFFSET(reserved6, 0x20);
9938 VERIFY_OFFSET(io_flags, 0x24);
9939 VERIFY_OFFSET(reserved7, 0x26);
9940 VERIFY_OFFSET(LUN, 0x34);
9941 VERIFY_OFFSET(control, 0x3C);
9942 VERIFY_OFFSET(CDB, 0x40);
9943 VERIFY_OFFSET(reserved8, 0x50);
9944 VERIFY_OFFSET(host_context_flags, 0x60);
9945 VERIFY_OFFSET(timeout_sec, 0x62);
9946 VERIFY_OFFSET(ReplyQueue, 0x64);
9947 VERIFY_OFFSET(reserved9, 0x65);
9948 VERIFY_OFFSET(tag, 0x68);
9949 VERIFY_OFFSET(host_addr, 0x70);
9950 VERIFY_OFFSET(CISS_LUN, 0x78);
9951 VERIFY_OFFSET(SG, 0x78 + 8);
9952#undef VERIFY_OFFSET
9953}
9954
9955module_init(hpsa_init);
9956module_exit(hpsa_cleanup);