Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] hpsa: move device attributes to avoid forward declarations

Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

Stephen M. Cameron and committed by
James Bottomley
3f5eac3a 5b94e232

+120 -133
+120 -133
drivers/scsi/hpsa.c
··· 155 155 static int hpsa_slave_alloc(struct scsi_device *sdev); 156 156 static void hpsa_slave_destroy(struct scsi_device *sdev); 157 157 158 - static ssize_t raid_level_show(struct device *dev, 159 - struct device_attribute *attr, char *buf); 160 - static ssize_t lunid_show(struct device *dev, 161 - struct device_attribute *attr, char *buf); 162 - static ssize_t unique_id_show(struct device *dev, 163 - struct device_attribute *attr, char *buf); 164 - static ssize_t host_show_firmware_revision(struct device *dev, 165 - struct device_attribute *attr, char *buf); 166 - static ssize_t host_show_commands_outstanding(struct device *dev, 167 - struct device_attribute *attr, char *buf); 168 - static ssize_t host_show_transport_mode(struct device *dev, 169 - struct device_attribute *attr, char *buf); 170 158 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 171 - static ssize_t host_store_rescan(struct device *dev, 172 - struct device_attribute *attr, const char *buf, size_t count); 173 159 static int check_for_unit_attention(struct ctlr_info *h, 174 160 struct CommandList *c); 175 161 static void check_ioctl_unit_attention(struct ctlr_info *h, ··· 175 189 void __iomem *vaddr, int wait_for_ready); 176 190 #define BOARD_NOT_READY 0 177 191 #define BOARD_READY 1 178 - 179 - static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 180 - static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 181 - static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 182 - static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 183 - static DEVICE_ATTR(firmware_revision, S_IRUGO, 184 - host_show_firmware_revision, NULL); 185 - static DEVICE_ATTR(commands_outstanding, S_IRUGO, 186 - host_show_commands_outstanding, NULL); 187 - static DEVICE_ATTR(transport_mode, S_IRUGO, 188 - host_show_transport_mode, NULL); 189 - 190 - static struct device_attribute *hpsa_sdev_attrs[] = { 191 - &dev_attr_raid_level, 192 - &dev_attr_lunid, 193 - &dev_attr_unique_id, 194 - NULL, 195 - }; 196 - 197 - static struct device_attribute *hpsa_shost_attrs[] = { 198 - &dev_attr_rescan, 199 - &dev_attr_firmware_revision, 200 - &dev_attr_commands_outstanding, 201 - &dev_attr_transport_mode, 202 - NULL, 203 - }; 204 - 205 - static struct scsi_host_template hpsa_driver_template = { 206 - .module = THIS_MODULE, 207 - .name = "hpsa", 208 - .proc_name = "hpsa", 209 - .queuecommand = hpsa_scsi_queue_command, 210 - .scan_start = hpsa_scan_start, 211 - .scan_finished = hpsa_scan_finished, 212 - .change_queue_depth = hpsa_change_queue_depth, 213 - .this_id = -1, 214 - .use_clustering = ENABLE_CLUSTERING, 215 - .eh_device_reset_handler = hpsa_eh_device_reset_handler, 216 - .ioctl = hpsa_ioctl, 217 - .slave_alloc = hpsa_slave_alloc, 218 - .slave_destroy = hpsa_slave_destroy, 219 - #ifdef CONFIG_COMPAT 220 - .compat_ioctl = hpsa_compat_ioctl, 221 - #endif 222 - .sdev_attrs = hpsa_sdev_attrs, 223 - .shost_attrs = hpsa_shost_attrs, 224 - }; 225 192 226 193 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 227 194 { ··· 273 334 "performant" : "simple"); 274 335 } 275 336 276 - /* Enqueuing and dequeuing functions for cmdlists. */ 277 - static inline void addQ(struct list_head *list, struct CommandList *c) 278 - { 279 - list_add_tail(&c->list, list); 280 - } 281 - 282 - static inline u32 next_command(struct ctlr_info *h) 283 - { 284 - u32 a; 285 - 286 - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 287 - return h->access.command_completed(h); 288 - 289 - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 290 - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 291 - (h->reply_pool_head)++; 292 - h->commands_outstanding--; 293 - } else { 294 - a = FIFO_EMPTY; 295 - } 296 - /* Check for wraparound */ 297 - if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 298 - h->reply_pool_head = h->reply_pool; 299 - h->reply_pool_wraparound ^= 1; 300 - } 301 - return a; 302 - } 303 - 304 - /* set_performant_mode: Modify the tag for cciss performant 305 - * set bit 0 for pull model, bits 3-1 for block fetch 306 - * register number 307 - */ 308 - static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 309 - { 310 - if (likely(h->transMethod & CFGTBL_Trans_Performant)) 311 - c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 312 - } 313 - 314 - static void enqueue_cmd_and_start_io(struct ctlr_info *h, 315 - struct CommandList *c) 316 - { 317 - unsigned long flags; 318 - 319 - set_performant_mode(h, c); 320 - spin_lock_irqsave(&h->lock, flags); 321 - addQ(&h->reqQ, c); 322 - h->Qdepth++; 323 - start_io(h); 324 - spin_unlock_irqrestore(&h->lock, flags); 325 - } 326 - 327 - static inline void removeQ(struct CommandList *c) 328 - { 329 - if (WARN_ON(list_empty(&c->list))) 330 - return; 331 - list_del_init(&c->list); 332 - } 333 - 334 - static inline int is_hba_lunid(unsigned char scsi3addr[]) 335 - { 336 - return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 337 - } 338 - 339 337 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 340 338 { 341 339 return (scsi3addr[3] & 0xC0) == 0x40; 342 - } 343 - 344 - static inline int is_scsi_rev_5(struct ctlr_info *h) 345 - { 346 - if (!h->hba_inquiry_data) 347 - return 0; 348 - if ((h->hba_inquiry_data[2] & 0x07) == 5) 349 - return 1; 350 - return 0; 351 340 } 352 341 353 342 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", ··· 367 500 sn[4], sn[5], sn[6], sn[7], 368 501 sn[8], sn[9], sn[10], sn[11], 369 502 sn[12], sn[13], sn[14], sn[15]); 503 + } 504 + 505 + static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 506 + static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 507 + static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 508 + static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 509 + static DEVICE_ATTR(firmware_revision, S_IRUGO, 510 + host_show_firmware_revision, NULL); 511 + static DEVICE_ATTR(commands_outstanding, S_IRUGO, 512 + host_show_commands_outstanding, NULL); 513 + static DEVICE_ATTR(transport_mode, S_IRUGO, 514 + host_show_transport_mode, NULL); 515 + 516 + static struct device_attribute *hpsa_sdev_attrs[] = { 517 + &dev_attr_raid_level, 518 + &dev_attr_lunid, 519 + &dev_attr_unique_id, 520 + NULL, 521 + }; 522 + 523 + static struct device_attribute *hpsa_shost_attrs[] = { 524 + &dev_attr_rescan, 525 + &dev_attr_firmware_revision, 526 + &dev_attr_commands_outstanding, 527 + &dev_attr_transport_mode, 528 + NULL, 529 + }; 530 + 531 + static struct scsi_host_template hpsa_driver_template = { 532 + .module = THIS_MODULE, 533 + .name = "hpsa", 534 + .proc_name = "hpsa", 535 + .queuecommand = hpsa_scsi_queue_command, 536 + .scan_start = hpsa_scan_start, 537 + .scan_finished = hpsa_scan_finished, 538 + .change_queue_depth = hpsa_change_queue_depth, 539 + .this_id = -1, 540 + .use_clustering = ENABLE_CLUSTERING, 541 + .eh_device_reset_handler = hpsa_eh_device_reset_handler, 542 + .ioctl = hpsa_ioctl, 543 + .slave_alloc = hpsa_slave_alloc, 544 + .slave_destroy = hpsa_slave_destroy, 545 + #ifdef CONFIG_COMPAT 546 + .compat_ioctl = hpsa_compat_ioctl, 547 + #endif 548 + .sdev_attrs = hpsa_sdev_attrs, 549 + .shost_attrs = hpsa_shost_attrs, 550 + }; 551 + 552 + 553 + /* Enqueuing and dequeuing functions for cmdlists. */ 554 + static inline void addQ(struct list_head *list, struct CommandList *c) 555 + { 556 + list_add_tail(&c->list, list); 557 + } 558 + 559 + static inline u32 next_command(struct ctlr_info *h) 560 + { 561 + u32 a; 562 + 563 + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 564 + return h->access.command_completed(h); 565 + 566 + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 567 + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 568 + (h->reply_pool_head)++; 569 + h->commands_outstanding--; 570 + } else { 571 + a = FIFO_EMPTY; 572 + } 573 + /* Check for wraparound */ 574 + if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 575 + h->reply_pool_head = h->reply_pool; 576 + h->reply_pool_wraparound ^= 1; 577 + } 578 + return a; 579 + } 580 + 581 + /* set_performant_mode: Modify the tag for cciss performant 582 + * set bit 0 for pull model, bits 3-1 for block fetch 583 + * register number 584 + */ 585 + static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 586 + { 587 + if (likely(h->transMethod & CFGTBL_Trans_Performant)) 588 + c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 589 + } 590 + 591 + static void enqueue_cmd_and_start_io(struct ctlr_info *h, 592 + struct CommandList *c) 593 + { 594 + unsigned long flags; 595 + 596 + set_performant_mode(h, c); 597 + spin_lock_irqsave(&h->lock, flags); 598 + addQ(&h->reqQ, c); 599 + h->Qdepth++; 600 + start_io(h); 601 + spin_unlock_irqrestore(&h->lock, flags); 602 + } 603 + 604 + static inline void removeQ(struct CommandList *c) 605 + { 606 + if (WARN_ON(list_empty(&c->list))) 607 + return; 608 + list_del_init(&c->list); 609 + } 610 + 611 + static inline int is_hba_lunid(unsigned char scsi3addr[]) 612 + { 613 + return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 614 + } 615 + 616 + static inline int is_scsi_rev_5(struct ctlr_info *h) 617 + { 618 + if (!h->hba_inquiry_data) 619 + return 0; 620 + if ((h->hba_inquiry_data[2] & 0x07) == 5) 621 + return 1; 622 + return 0; 370 623 } 371 624 372 625 static int hpsa_find_target_lun(struct ctlr_info *h,