Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: smartpqi: add ofa support

- when OFA event occurs, driver will stop traffic to RAID/HBA path. Driver
waits for all the outstanding requests to complete.
- Driver sends OFA event acknowledgment to firmware.
- Driver will wait until the new firmware is up and running.
- Driver will free up the resources.
- Driver calls SIS/PQI initialization and rescans the device list.
- Driver will resume the traffic to RAID/HBA path.

Reviewed-by: Murthy Bhat <murthy.bhat@microsemi.com>
Signed-off-by: Mahesh Rajashekhara <mahesh.rajashekhara@microsemi.com>
Signed-off-by: Don Brace <don.brace@microsemi.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Mahesh Rajashekhara and committed by
Martin K. Petersen
4fd22c13 65111785

+634 -43
+72 -4
drivers/scsi/smartpqi/smartpqi.h
··· 100 100 struct pqi_device_registers pqi_registers; /* 4000h */ 101 101 }; 102 102 103 + #if ((HZ) < 1000) 104 + #define PQI_HZ 1000 105 + #else 106 + #define PQI_HZ (HZ) 107 + #endif 108 + 103 109 #define PQI_DEVICE_REGISTERS_OFFSET 0x4000 104 110 105 111 enum pqi_io_path { ··· 356 350 357 351 #define PQI_MAX_EVENT_DESCRIPTORS 255 358 352 353 + #define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0 354 + #define PQI_EVENT_OFA_QUIESCE 0x1 355 + #define PQI_EVENT_OFA_CANCELLED 0x2 356 + 359 357 struct pqi_event_response { 360 358 struct pqi_iu_header header; 361 359 u8 event_type; ··· 367 357 u8 request_acknowlege : 1; 368 358 __le16 event_id; 369 359 __le32 additional_event_id; 370 - u8 data[16]; 360 + union { 361 + struct { 362 + __le32 bytes_requested; 363 + u8 reserved[12]; 364 + } ofa_memory_allocation; 365 + 366 + struct { 367 + __le16 reason; /* reason for cancellation */ 368 + u8 reserved[14]; 369 + } ofa_cancelled; 370 + } data; 371 371 }; 372 372 373 373 struct pqi_event_acknowledge_request { ··· 440 420 }; 441 421 442 422 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 423 + #define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 424 + 425 + #define PQI_OFA_VERSION 1 426 + #define PQI_OFA_SIGNATURE "OFA_QRM" 427 + #define PQI_OFA_MAX_SG_DESCRIPTORS 64 428 + 429 + #define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \ 430 + (offsetof(struct pqi_ofa_memory, sg_descriptor) + \ 431 + (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor))) 432 + 433 + struct pqi_ofa_memory { 434 + __le64 signature; /* "OFA_QRM" */ 435 + __le16 version; /* version of this struct(1 = 1st version) */ 436 + u8 reserved[62]; 437 + __le32 bytes_allocated; /* total allocated memory in bytes */ 438 + __le16 num_memory_descriptors; 439 + u8 reserved1[2]; 440 + struct pqi_sg_descriptor sg_descriptor[1]; 441 + }; 443 442 444 443 struct pqi_aio_error_info { 445 444 u8 status; ··· 565 526 #define PQI_EVENT_TYPE_HARDWARE 0x2 566 527 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 567 528 #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 529 + #define PQI_EVENT_TYPE_OFA 0xfb 568 530 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd 569 531 #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe 570 532 ··· 725 685 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2 726 686 #define PQI_CONFIG_TABLE_SECTION_DEBUG 3 727 687 #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4 688 + #define PQI_CONFIG_TABLE_SECTION_SOFT_RESET 5 728 689 729 690 struct pqi_config_table { 730 691 u8 signature[8]; /* "CFGTABLE" */ ··· 765 724 /* u8 features_enabled[]; */ 766 725 }; 767 726 768 - #define PQI_FIRMWARE_FEATURE_OFA 0 769 - #define PQI_FIRMWARE_FEATURE_SMP 1 727 + #define PQI_FIRMWARE_FEATURE_OFA 0 728 + #define PQI_FIRMWARE_FEATURE_SMP 1 729 + #define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 770 730 771 731 struct pqi_config_table_debug { 772 732 struct pqi_config_table_section_header header; ··· 777 735 struct pqi_config_table_heartbeat { 778 736 struct pqi_config_table_section_header header; 779 737 __le32 heartbeat_counter; 738 + }; 739 + 740 + struct pqi_config_table_soft_reset { 741 + struct pqi_config_table_section_header header; 742 + u8 soft_reset_status; 743 + }; 744 + 745 + #define PQI_SOFT_RESET_INITIATE 0x1 746 + #define PQI_SOFT_RESET_ABORT 0x2 747 + 748 + enum pqi_soft_reset_status { 749 + RESET_INITIATE_FIRMWARE, 750 + RESET_INITIATE_DRIVER, 751 + RESET_ABORT, 752 + RESET_NORESPONSE, 753 + RESET_TIMEDOUT 780 754 }; 781 755 782 756 union pqi_reset_register { ··· 1058 1000 struct list_head request_list_entry; 1059 1001 }; 1060 1002 1061 - #define PQI_NUM_SUPPORTED_EVENTS 6 1003 + #define PQI_NUM_SUPPORTED_EVENTS 7 1062 1004 1063 1005 struct pqi_event { 1064 1006 bool pending; 1065 1007 u8 event_type; 1066 1008 __le16 event_id; 1067 1009 __le32 additional_event_id; 1010 + __le32 ofa_bytes_requested; 1011 + __le16 ofa_cancel_reason; 1068 1012 }; 1069 1013 1070 1014 #define PQI_RESERVED_IO_SLOTS_LUN_RESET 1 ··· 1127 1067 1128 1068 struct mutex scan_mutex; 1129 1069 struct mutex lun_reset_mutex; 1070 + struct mutex ofa_mutex; /* serialize ofa */ 1130 1071 bool controller_online; 1131 1072 bool block_requests; 1132 1073 bool in_shutdown; 1074 + bool in_ofa; 1133 1075 u8 inbound_spanning_supported : 1; 1134 1076 u8 outbound_spanning_supported : 1; 1135 1077 u8 pqi_mode_enabled : 1; 1136 1078 u8 pqi_reset_quiesce_supported : 1; 1079 + u8 soft_reset_handshake_supported : 1; 1137 1080 1138 1081 struct list_head scsi_device_list; 1139 1082 spinlock_t scsi_device_list_lock; ··· 1157 1094 int previous_num_interrupts; 1158 1095 u32 previous_heartbeat_count; 1159 1096 __le32 __iomem *heartbeat_counter; 1097 + u8 __iomem *soft_reset_status; 1160 1098 struct timer_list heartbeat_timer; 1161 1099 struct work_struct ctrl_offline_work; 1162 1100 ··· 1169 1105 struct list_head raid_bypass_retry_list; 1170 1106 spinlock_t raid_bypass_retry_list_lock; 1171 1107 struct work_struct raid_bypass_retry_work; 1108 + 1109 + struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; 1110 + dma_addr_t pqi_ofa_mem_dma_handle; 1111 + void **pqi_ofa_chunk_virt_addr; 1172 1112 }; 1173 1113 1174 1114 enum pqi_ctrl_mode {
+551 -36
drivers/scsi/smartpqi/smartpqi_init.c
··· 74 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 + static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 78 + static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 79 + static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 80 + static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 81 + u32 bytes_requested); 82 + static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 83 + static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 77 84 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 78 85 struct pqi_scsi_dev *device, unsigned long timeout_secs); 79 86 ··· 122 115 PQI_EVENT_TYPE_HARDWARE, 123 116 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 124 117 PQI_EVENT_TYPE_LOGICAL_DEVICE, 118 + PQI_EVENT_TYPE_OFA, 125 119 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 126 120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 127 121 }; ··· 300 292 return device->in_reset; 301 293 } 302 294 295 + static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 296 + { 297 + ctrl_info->in_ofa = true; 298 + } 299 + 300 + static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 301 + { 302 + ctrl_info->in_ofa = false; 303 + } 304 + 305 + static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) 306 + { 307 + return ctrl_info->in_ofa; 308 + } 309 + 303 310 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 304 311 { 305 312 device->in_remove = true; ··· 331 308 { 332 309 if (pqi_ctrl_offline(ctrl_info)) 333 310 return; 311 + if (pqi_ctrl_in_ofa(ctrl_info)) 312 + return; 334 313 335 314 schedule_delayed_work(&ctrl_info->rescan_work, delay); 336 315 } ··· 342 317 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 343 318 } 344 319 345 - #define PQI_RESCAN_WORK_DELAY (10 * HZ) 320 + #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) 346 321 347 322 static inline void pqi_schedule_rescan_worker_delayed( 348 323 struct pqi_ctrl_info *ctrl_info) ··· 361 336 return 0; 362 337 363 338 return readl(ctrl_info->heartbeat_counter); 339 + } 340 + 341 + static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 342 + { 343 + if (!ctrl_info->soft_reset_status) 344 + return 0; 345 + 346 + return readb(ctrl_info->soft_reset_status); 347 + } 348 + 349 + static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, 350 + u8 clear) 351 + { 352 + u8 status; 353 + 354 + if (!ctrl_info->soft_reset_status) 355 + return; 356 + 357 + status = pqi_read_soft_reset_status(ctrl_info); 358 + status &= ~clear; 359 + writeb(status, ctrl_info->soft_reset_status); 364 360 } 365 361 366 362 static int pqi_map_single(struct pci_dev *pci_dev, ··· 892 846 return rc; 893 847 } 894 848 895 - #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 849 + #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) 896 850 897 851 static void pqi_update_time_worker(struct work_struct *work) 898 852 { ··· 1860 1814 1861 1815 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1862 1816 1817 + if (pqi_ctrl_in_ofa(ctrl_info)) 1818 + pqi_ctrl_ofa_done(ctrl_info); 1819 + 1863 1820 /* Remove all devices that have gone away. */ 1864 1821 list_for_each_entry_safe(device, next, &delete_list, 1865 1822 delete_list_entry) { ··· 2207 2158 2208 2159 static void pqi_scan_start(struct Scsi_Host *shost) 2209 2160 { 2210 - pqi_scan_scsi_devices(shost_to_hba(shost)); 2161 + struct pqi_ctrl_info *ctrl_info; 2162 + 2163 + ctrl_info = shost_to_hba(shost); 2164 + if (pqi_ctrl_in_ofa(ctrl_info)) 2165 + return; 2166 + 2167 + pqi_scan_scsi_devices(ctrl_info); 2211 2168 } 2212 2169 2213 2170 /* Returns TRUE if scan is finished. */ ··· 2238 2183 { 2239 2184 mutex_lock(&ctrl_info->lun_reset_mutex); 2240 2185 mutex_unlock(&ctrl_info->lun_reset_mutex); 2186 + } 2187 + 2188 + static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 2189 + { 2190 + mutex_lock(&ctrl_info->ofa_mutex); 2191 + mutex_unlock(&ctrl_info->ofa_mutex); 2241 2192 } 2242 2193 2243 2194 static inline void pqi_set_encryption_info( ··· 2622 2561 u8 status; 2623 2562 2624 2563 pqi_registers = ctrl_info->pqi_registers; 2625 - timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2564 + timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; 2626 2565 2627 2566 while (1) { 2628 2567 signature = readq(&pqi_registers->signature); ··· 3061 3000 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3062 3001 } 3063 3002 3003 + #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3004 + #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3005 + 3006 + static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3007 + struct pqi_ctrl_info *ctrl_info) 3008 + { 3009 + unsigned long timeout; 3010 + u8 status; 3011 + 3012 + timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 3013 + 3014 + while (1) { 3015 + status = pqi_read_soft_reset_status(ctrl_info); 3016 + if (status & PQI_SOFT_RESET_INITIATE) 3017 + return RESET_INITIATE_DRIVER; 3018 + 3019 + if (status & PQI_SOFT_RESET_ABORT) 3020 + return RESET_ABORT; 3021 + 3022 + if (time_after(jiffies, timeout)) { 3023 + dev_err(&ctrl_info->pci_dev->dev, 3024 + "timed out waiting for soft reset status\n"); 3025 + return RESET_TIMEDOUT; 3026 + } 3027 + 3028 + if (!sis_is_firmware_running(ctrl_info)) 3029 + return RESET_NORESPONSE; 3030 + 3031 + ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3032 + } 3033 + } 3034 + 3035 + static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, 3036 + enum pqi_soft_reset_status reset_status) 3037 + { 3038 + int rc; 3039 + 3040 + switch (reset_status) { 3041 + case RESET_INITIATE_DRIVER: 3042 + /* fall through */ 3043 + case RESET_TIMEDOUT: 3044 + dev_info(&ctrl_info->pci_dev->dev, 3045 + "resetting controller %u\n", ctrl_info->ctrl_id); 3046 + sis_soft_reset(ctrl_info); 3047 + /* fall through */ 3048 + case RESET_INITIATE_FIRMWARE: 3049 + rc = pqi_ofa_ctrl_restart(ctrl_info); 3050 + pqi_ofa_free_host_buffer(ctrl_info); 3051 + dev_info(&ctrl_info->pci_dev->dev, 3052 + "Online Firmware Activation for controller %u: %s\n", 3053 + ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); 3054 + break; 3055 + case RESET_ABORT: 3056 + pqi_ofa_ctrl_unquiesce(ctrl_info); 3057 + dev_info(&ctrl_info->pci_dev->dev, 3058 + "Online Firmware Activation for controller %u: %s\n", 3059 + ctrl_info->ctrl_id, "ABORTED"); 3060 + break; 3061 + case RESET_NORESPONSE: 3062 + pqi_ofa_free_host_buffer(ctrl_info); 3063 + pqi_take_ctrl_offline(ctrl_info); 3064 + break; 3065 + } 3066 + } 3067 + 3068 + static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3069 + struct pqi_event *event) 3070 + { 3071 + u16 event_id; 3072 + enum pqi_soft_reset_status status; 3073 + 3074 + event_id = get_unaligned_le16(&event->event_id); 3075 + 3076 + mutex_lock(&ctrl_info->ofa_mutex); 3077 + 3078 + if (event_id == PQI_EVENT_OFA_QUIESCE) { 3079 + dev_info(&ctrl_info->pci_dev->dev, 3080 + "Received Online Firmware Activation quiesce event for controller %u\n", 3081 + ctrl_info->ctrl_id); 3082 + pqi_ofa_ctrl_quiesce(ctrl_info); 3083 + pqi_acknowledge_event(ctrl_info, event); 3084 + if (ctrl_info->soft_reset_handshake_supported) { 3085 + status = pqi_poll_for_soft_reset_status(ctrl_info); 3086 + pqi_process_soft_reset(ctrl_info, status); 3087 + } else { 3088 + pqi_process_soft_reset(ctrl_info, 3089 + RESET_INITIATE_FIRMWARE); 3090 + } 3091 + 3092 + } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3093 + pqi_acknowledge_event(ctrl_info, event); 3094 + pqi_ofa_setup_host_buffer(ctrl_info, 3095 + le32_to_cpu(event->ofa_bytes_requested)); 3096 + pqi_ofa_host_memory_update(ctrl_info); 3097 + } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3098 + pqi_ofa_free_host_buffer(ctrl_info); 3099 + pqi_acknowledge_event(ctrl_info, event); 3100 + dev_info(&ctrl_info->pci_dev->dev, 3101 + "Online Firmware Activation(%u) cancel reason : %u\n", 3102 + ctrl_info->ctrl_id, event->ofa_cancel_reason); 3103 + } 3104 + 3105 + mutex_unlock(&ctrl_info->ofa_mutex); 3106 + } 3107 + 3064 3108 static void pqi_event_worker(struct work_struct *work) 3065 3109 { 3066 3110 unsigned int i; ··· 3185 3019 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3186 3020 if (event->pending) { 3187 3021 event->pending = false; 3022 + if (event->event_type == PQI_EVENT_TYPE_OFA) { 3023 + pqi_ctrl_unbusy(ctrl_info); 3024 + pqi_ofa_process_event(ctrl_info, event); 3025 + return; 3026 + } 3188 3027 pqi_acknowledge_event(ctrl_info, event); 3189 3028 } 3190 3029 event++; ··· 3199 3028 pqi_ctrl_unbusy(ctrl_info); 3200 3029 } 3201 3030 3202 - #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3031 + #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) 3203 3032 3204 3033 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3205 3034 { ··· 3268 3097 return pqi_event_type_to_event_index(event_type) != -1; 3269 3098 } 3270 3099 3100 + static void pqi_ofa_capture_event_payload(struct pqi_event *event, 3101 + struct pqi_event_response *response) 3102 + { 3103 + u16 event_id; 3104 + 3105 + event_id = get_unaligned_le16(&event->event_id); 3106 + 3107 + if (event->event_type == PQI_EVENT_TYPE_OFA) { 3108 + if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3109 + event->ofa_bytes_requested = 3110 + response->data.ofa_memory_allocation.bytes_requested; 3111 + } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3112 + event->ofa_cancel_reason = 3113 + response->data.ofa_cancelled.reason; 3114 + } 3115 + } 3116 + } 3117 + 3271 3118 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3272 3119 { 3273 3120 unsigned int num_events; ··· 3320 3131 event->event_id = response->event_id; 3321 3132 event->additional_event_id = 3322 3133 response->additional_event_id; 3134 + pqi_ofa_capture_event_payload(event, response); 3323 3135 } 3324 3136 } 3325 3137 ··· 3754 3564 return 0; 3755 3565 } 3756 3566 3757 - #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3567 + #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ 3758 3568 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3759 3569 3760 3570 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) ··· 3847 3657 admin_queues = &ctrl_info->admin_queues; 3848 3658 oq_ci = admin_queues->oq_ci_copy; 3849 3659 3850 - timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3660 + timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; 3851 3661 3852 3662 while (1) { 3853 3663 oq_pi = readl(admin_queues->oq_pi); ··· 3962 3772 3963 3773 while (1) { 3964 3774 if (wait_for_completion_io_timeout(wait, 3965 - PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3775 + PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { 3966 3776 rc = 0; 3967 3777 break; 3968 3778 } ··· 5335 5145 } 5336 5146 5337 5147 pqi_ctrl_busy(ctrl_info); 5338 - if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 5148 + if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || 5149 + pqi_ctrl_in_ofa(ctrl_info)) { 5339 5150 rc = SCSI_MLQUEUE_HOST_BUSY; 5340 5151 goto out; 5341 5152 } ··· 5481 5290 } 5482 5291 } 5483 5292 5293 + static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) 5294 + { 5295 + unsigned int i; 5296 + unsigned int path; 5297 + struct pqi_queue_group *queue_group; 5298 + unsigned long flags; 5299 + struct pqi_io_request *io_request; 5300 + struct pqi_io_request *next; 5301 + struct scsi_cmnd *scmd; 5302 + 5303 + for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5304 + queue_group = &ctrl_info->queue_groups[i]; 5305 + 5306 + for (path = 0; path < 2; path++) { 5307 + spin_lock_irqsave(&queue_group->submit_lock[path], 5308 + flags); 5309 + 5310 + list_for_each_entry_safe(io_request, next, 5311 + &queue_group->request_list[path], 5312 + request_list_entry) { 5313 + 5314 + scmd = io_request->scmd; 5315 + if (!scmd) 5316 + continue; 5317 + 5318 + list_del(&io_request->request_list_entry); 5319 + set_host_byte(scmd, DID_RESET); 5320 + pqi_scsi_done(scmd); 5321 + } 5322 + 5323 + spin_unlock_irqrestore( 5324 + &queue_group->submit_lock[path], flags); 5325 + } 5326 + } 5327 + } 5328 + 5484 5329 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5485 5330 struct pqi_scsi_dev *device, unsigned long timeout_secs) 5486 5331 { 5487 5332 unsigned long timeout; 5488 5333 5489 - timeout = (timeout_secs * HZ) + jiffies; 5334 + timeout = (timeout_secs * PQI_HZ) + jiffies; 5490 5335 5491 5336 while (atomic_read(&device->scsi_cmds_outstanding)) { 5492 5337 pqi_check_ctrl_health(ctrl_info); ··· 5541 5314 return 0; 5542 5315 } 5543 5316 5544 - static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5317 + static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5318 + unsigned long timeout_secs) 5545 5319 { 5546 5320 bool io_pending; 5547 5321 unsigned long flags; 5322 + unsigned long timeout; 5548 5323 struct pqi_scsi_dev *device; 5549 5324 5325 + timeout = (timeout_secs * PQI_HZ) + jiffies; 5550 5326 while (1) { 5551 5327 io_pending = false; 5552 5328 ··· 5571 5341 if (pqi_ctrl_offline(ctrl_info)) 5572 5342 return -ENXIO; 5573 5343 5344 + if (timeout_secs != NO_TIMEOUT) { 5345 + if (time_after(jiffies, timeout)) { 5346 + dev_err(&ctrl_info->pci_dev->dev, 5347 + "timed out waiting for pending IO\n"); 5348 + return -ETIMEDOUT; 5349 + } 5350 + } 5574 5351 usleep_range(1000, 2000); 5575 5352 } 5576 5353 ··· 5601 5364 5602 5365 while (1) { 5603 5366 if (wait_for_completion_io_timeout(wait, 5604 - PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5367 + PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) { 5605 5368 rc = 0; 5606 5369 break; 5607 5370 } ··· 5656 5419 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5657 5420 /* Performs a reset at the LUN level. */ 5658 5421 5659 - static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5422 + static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5660 5423 struct pqi_scsi_dev *device) 5661 5424 { 5662 5425 int rc; 5663 5426 unsigned int retries; 5427 + unsigned long timeout_secs; 5664 5428 5665 5429 for (retries = 0;;) { 5666 5430 rc = pqi_lun_reset(ctrl_info, device); ··· 5670 5432 break; 5671 5433 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5672 5434 } 5673 - if (rc == 0) 5674 - rc = pqi_device_wait_for_pending_io(ctrl_info, 5675 - device, NO_TIMEOUT); 5435 + timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT; 5436 + 5437 + rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); 5676 5438 5677 5439 return rc == 0 ? SUCCESS : FAILED; 5440 + } 5441 + 5442 + static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5443 + struct pqi_scsi_dev *device) 5444 + { 5445 + int rc; 5446 + 5447 + mutex_lock(&ctrl_info->lun_reset_mutex); 5448 + 5449 + pqi_ctrl_block_requests(ctrl_info); 5450 + pqi_ctrl_wait_until_quiesced(ctrl_info); 5451 + pqi_fail_io_queued_for_device(ctrl_info, device); 5452 + rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5453 + pqi_device_reset_start(device); 5454 + pqi_ctrl_unblock_requests(ctrl_info); 5455 + 5456 + if (rc) 5457 + rc = FAILED; 5458 + else 5459 + rc = _pqi_device_reset(ctrl_info, device); 5460 + 5461 + pqi_device_reset_done(device); 5462 + 5463 + mutex_unlock(&ctrl_info->lun_reset_mutex); 5464 + return rc; 5678 5465 } 5679 5466 5680 5467 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) ··· 5719 5456 5720 5457 pqi_check_ctrl_health(ctrl_info); 5721 5458 if (pqi_ctrl_offline(ctrl_info)) { 5459 + dev_err(&ctrl_info->pci_dev->dev, 5460 + "controller %u offlined - cannot send device reset\n", 5461 + ctrl_info->ctrl_id); 5722 5462 rc = FAILED; 5723 5463 goto out; 5724 5464 } 5725 5465 5726 - mutex_lock(&ctrl_info->lun_reset_mutex); 5466 + pqi_wait_until_ofa_finished(ctrl_info); 5727 5467 5728 - pqi_ctrl_block_requests(ctrl_info); 5729 - pqi_ctrl_wait_until_quiesced(ctrl_info); 5730 - pqi_fail_io_queued_for_device(ctrl_info, device); 5731 - rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5732 - pqi_device_reset_start(device); 5733 - pqi_ctrl_unblock_requests(ctrl_info); 5734 - 5735 - if (rc) 5736 - rc = FAILED; 5737 - else 5738 - rc = pqi_device_reset(ctrl_info, device); 5739 - 5740 - pqi_device_reset_done(device); 5741 - 5742 - mutex_unlock(&ctrl_info->lun_reset_mutex); 5743 - 5468 + rc = pqi_device_reset(ctrl_info, device); 5744 5469 out: 5745 5470 dev_err(&ctrl_info->pci_dev->dev, 5746 5471 "reset of scsi %d:%d:%d:%d: %s\n", ··· 6046 5795 struct pqi_ctrl_info *ctrl_info; 6047 5796 6048 5797 ctrl_info = shost_to_hba(sdev->host); 5798 + 5799 + if (pqi_ctrl_in_ofa(ctrl_info)) 5800 + return -EBUSY; 6049 5801 6050 5802 switch (cmd) { 6051 5803 case CCISS_DEREGDISK: ··· 6711 6457 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6712 6458 .feature_status = pqi_firmware_feature_status, 6713 6459 }, 6460 + { 6461 + .feature_name = "New Soft Reset Handshake", 6462 + .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 6463 + .feature_status = pqi_firmware_feature_status, 6464 + }, 6714 6465 }; 6715 6466 6716 6467 static void pqi_process_firmware_features( ··· 6768 6509 return; 6769 6510 } 6770 6511 6512 + ctrl_info->soft_reset_handshake_supported = false; 6771 6513 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6772 6514 if (!pqi_firmware_features[i].supported) 6773 6515 continue; 6774 6516 if (pqi_is_firmware_feature_enabled(firmware_features, 6775 6517 firmware_features_iomem_addr, 6776 - pqi_firmware_features[i].feature_bit)) 6518 + pqi_firmware_features[i].feature_bit)) { 6777 6519 pqi_firmware_features[i].enabled = true; 6520 + if (pqi_firmware_features[i].feature_bit == 6521 + PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE) 6522 + ctrl_info->soft_reset_handshake_supported = 6523 + true; 6524 + } 6778 6525 pqi_firmware_feature_update(ctrl_info, 6779 6526 &pqi_firmware_features[i]); 6780 6527 } ··· 6860 6595 offsetof( 6861 6596 struct pqi_config_table_heartbeat, 6862 6597 heartbeat_counter); 6598 + break; 6599 + case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 6600 + ctrl_info->soft_reset_status = 6601 + table_iomem_addr + 6602 + section_offset + 6603 + offsetof(struct pqi_config_table_soft_reset, 6604 + soft_reset_status); 6863 6605 break; 6864 6606 } 6865 6607 ··· 7151 6879 return rc; 7152 6880 7153 6881 /* 6882 + * Get the controller properties. This allows us to determine 6883 + * whether or not it supports PQI mode. 6884 + */ 6885 + rc = sis_get_ctrl_properties(ctrl_info); 6886 + if (rc) { 6887 + dev_err(&ctrl_info->pci_dev->dev, 6888 + "error obtaining controller properties\n"); 6889 + return rc; 6890 + } 6891 + 6892 + rc = sis_get_pqi_capabilities(ctrl_info); 6893 + if (rc) { 6894 + dev_err(&ctrl_info->pci_dev->dev, 6895 + "error obtaining controller capabilities\n"); 6896 + return rc; 6897 + } 6898 + 6899 + /* 7154 6900 * If the function we are about to call succeeds, the 7155 6901 * controller will transition from legacy SIS mode 7156 6902 * into PQI mode. ··· 7208 6918 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7209 6919 7210 6920 ctrl_info->controller_online = true; 7211 - pqi_start_heartbeat_timer(ctrl_info); 7212 6921 pqi_ctrl_unblock_requests(ctrl_info); 6922 + 6923 + rc = pqi_process_config_table(ctrl_info); 6924 + if (rc) 6925 + return rc; 6926 + 6927 + pqi_start_heartbeat_timer(ctrl_info); 7213 6928 7214 6929 rc = pqi_enable_events(ctrl_info); 7215 6930 if (rc) { 7216 6931 dev_err(&ctrl_info->pci_dev->dev, 7217 6932 "error enabling events\n"); 6933 + return rc; 6934 + } 6935 + 6936 + rc = pqi_get_ctrl_firmware_version(ctrl_info); 6937 + if (rc) { 6938 + dev_err(&ctrl_info->pci_dev->dev, 6939 + "error obtaining firmware version\n"); 7218 6940 return rc; 7219 6941 } 7220 6942 ··· 7347 7045 7348 7046 mutex_init(&ctrl_info->scan_mutex); 7349 7047 mutex_init(&ctrl_info->lun_reset_mutex); 7048 + mutex_init(&ctrl_info->ofa_mutex); 7350 7049 7351 7050 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7352 7051 spin_lock_init(&ctrl_info->scsi_device_list_lock); ··· 7422 7119 if (ctrl_info->pqi_mode_enabled) 7423 7120 pqi_revert_to_sis_mode(ctrl_info); 7424 7121 pqi_free_ctrl_resources(ctrl_info); 7122 + } 7123 + 7124 + static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 7125 + { 7126 + pqi_cancel_update_time_worker(ctrl_info); 7127 + pqi_cancel_rescan_worker(ctrl_info); 7128 + pqi_wait_until_lun_reset_finished(ctrl_info); 7129 + pqi_wait_until_scan_finished(ctrl_info); 7130 + pqi_ctrl_ofa_start(ctrl_info); 7131 + pqi_ctrl_block_requests(ctrl_info); 7132 + pqi_ctrl_wait_until_quiesced(ctrl_info); 7133 + pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); 7134 + pqi_fail_io_queued_for_all_devices(ctrl_info); 7135 + pqi_wait_until_inbound_queues_empty(ctrl_info); 7136 + pqi_stop_heartbeat_timer(ctrl_info); 7137 + ctrl_info->pqi_mode_enabled = false; 7138 + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7139 + } 7140 + 7141 + static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 7142 + { 7143 + pqi_ofa_free_host_buffer(ctrl_info); 7144 + ctrl_info->pqi_mode_enabled = true; 7145 + pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7146 + ctrl_info->controller_online = true; 7147 + pqi_ctrl_unblock_requests(ctrl_info); 7148 + pqi_start_heartbeat_timer(ctrl_info); 7149 + pqi_schedule_update_time_worker(ctrl_info); 7150 + pqi_clear_soft_reset_status(ctrl_info, 7151 + PQI_SOFT_RESET_ABORT); 7152 + pqi_scan_scsi_devices(ctrl_info); 7153 + } 7154 + 7155 + static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, 7156 + u32 total_size, u32 chunk_size) 7157 + { 7158 + u32 sg_count; 7159 + u32 size; 7160 + int i; 7161 + struct pqi_sg_descriptor *mem_descriptor = NULL; 7162 + struct device *dev; 7163 + struct pqi_ofa_memory *ofap; 7164 + 7165 + dev = &ctrl_info->pci_dev->dev; 7166 + 7167 + sg_count = (total_size + chunk_size - 1); 7168 + do_div(sg_count, chunk_size); 7169 + 7170 + ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7171 + 7172 + if (sg_count*chunk_size < total_size) 7173 + goto out; 7174 + 7175 + ctrl_info->pqi_ofa_chunk_virt_addr = 7176 + kcalloc(sg_count, sizeof(void *), GFP_KERNEL); 7177 + if (!ctrl_info->pqi_ofa_chunk_virt_addr) 7178 + goto out; 7179 + 7180 + for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { 7181 + dma_addr_t dma_handle; 7182 + 7183 + ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7184 + dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7185 + GFP_KERNEL); 7186 + 7187 + if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7188 + break; 7189 + 7190 + mem_descriptor = &ofap->sg_descriptor[i]; 7191 + put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); 7192 + put_unaligned_le32 (chunk_size, &mem_descriptor->length); 7193 + } 7194 + 7195 + if (!size || size < total_size) 7196 + goto out_free_chunks; 7197 + 7198 + put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 7199 + put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 7200 + put_unaligned_le32(size, &ofap->bytes_allocated); 7201 + 7202 + return 0; 7203 + 7204 + out_free_chunks: 7205 + while (--i >= 0) { 7206 + mem_descriptor = &ofap->sg_descriptor[i]; 7207 + dma_free_coherent(dev, chunk_size, 7208 + ctrl_info->pqi_ofa_chunk_virt_addr[i], 7209 + get_unaligned_le64(&mem_descriptor->address)); 7210 + } 7211 + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7212 + 7213 + out: 7214 + put_unaligned_le32 (0, &ofap->bytes_allocated); 7215 + return -ENOMEM; 7216 + } 7217 + 7218 + static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 7219 + { 7220 + u32 total_size; 7221 + u32 min_chunk_size; 7222 + u32 chunk_sz; 7223 + 7224 + total_size = le32_to_cpu( 7225 + ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); 7226 + min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; 7227 + 7228 + for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) 7229 + if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) 7230 + return 0; 7231 + 7232 + return -ENOMEM; 7233 + } 7234 + 7235 + static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 7236 + u32 bytes_requested) 7237 + { 7238 + struct pqi_ofa_memory *pqi_ofa_memory; 7239 + struct device *dev; 7240 + 7241 + dev = &ctrl_info->pci_dev->dev; 7242 + pqi_ofa_memory = dma_zalloc_coherent(dev, 7243 + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7244 + &ctrl_info->pqi_ofa_mem_dma_handle, 7245 + GFP_KERNEL); 7246 + 7247 + if (!pqi_ofa_memory) 7248 + return; 7249 + 7250 + put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); 7251 + memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, 7252 + sizeof(pqi_ofa_memory->signature)); 7253 + pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); 7254 + 7255 + ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; 7256 + 7257 + if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 7258 + dev_err(dev, "Failed to allocate host buffer of size = %u", 7259 + bytes_requested); 7260 + } 7261 + } 7262 + 7263 + static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 7264 + { 7265 + int i; 7266 + struct pqi_sg_descriptor *mem_descriptor; 7267 + struct pqi_ofa_memory *ofap; 7268 + 7269 + ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7270 + 7271 + if (!ofap) 7272 + return; 7273 + 7274 + if (!ofap->bytes_allocated) 7275 + goto out; 7276 + 7277 + mem_descriptor = ofap->sg_descriptor; 7278 + 7279 + for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); 7280 + i++) { 7281 + dma_free_coherent(&ctrl_info->pci_dev->dev, 7282 + get_unaligned_le32(&mem_descriptor[i].length), 7283 + ctrl_info->pqi_ofa_chunk_virt_addr[i], 7284 + get_unaligned_le64(&mem_descriptor[i].address)); 7285 + } 7286 + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7287 + 7288 + out: 7289 + dma_free_coherent(&ctrl_info->pci_dev->dev, 7290 + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, 7291 + ctrl_info->pqi_ofa_mem_dma_handle); 7292 + ctrl_info->pqi_ofa_mem_virt_addr = NULL; 7293 + } 7294 + 7295 + static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 7296 + { 7297 + struct pqi_vendor_general_request request; 7298 + size_t size; 7299 + struct pqi_ofa_memory *ofap; 7300 + 7301 + memset(&request, 0, sizeof(request)); 7302 + 7303 + ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7304 + 7305 + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7306 + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7307 + &request.header.iu_length); 7308 + put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 7309 + &request.function_code); 7310 + 7311 + if (ofap) { 7312 + size = offsetof(struct pqi_ofa_memory, sg_descriptor) + 7313 + get_unaligned_le16(&ofap->num_memory_descriptors) * 7314 + sizeof(struct pqi_sg_descriptor); 7315 + 7316 + put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 7317 + &request.data.ofa_memory_allocation.buffer_address); 7318 + put_unaligned_le32(size, 7319 + &request.data.ofa_memory_allocation.buffer_length); 7320 + 7321 + } 7322 + 7323 + return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 7324 + 0, NULL, NO_TIMEOUT); 7325 + } 7326 + 7327 + #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 7328 + 7329 + static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) 7330 + { 7331 + msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7332 + return pqi_ctrl_init_resume(ctrl_info); 7425 7333 } 7426 7334 7427 7335 static void pqi_perform_lockup_action(void) ··· 7854 7340 pqi_cancel_rescan_worker(ctrl_info); 7855 7341 pqi_wait_until_scan_finished(ctrl_info); 7856 7342 pqi_wait_until_lun_reset_finished(ctrl_info); 7343 + pqi_wait_until_ofa_finished(ctrl_info); 7857 7344 pqi_flush_cache(ctrl_info, SUSPEND); 7858 7345 pqi_ctrl_block_requests(ctrl_info); 7859 7346 pqi_ctrl_wait_until_quiesced(ctrl_info); 7860 7347 pqi_wait_until_inbound_queues_empty(ctrl_info); 7861 - pqi_ctrl_wait_for_pending_io(ctrl_info); 7348 + pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 7862 7349 pqi_stop_heartbeat_timer(ctrl_info); 7863 7350 7864 7351 if (state.event == PM_EVENT_FREEZE)
+10 -3
drivers/scsi/smartpqi/smartpqi_sis.c
··· 34 34 #define SIS_REENABLE_SIS_MODE 0x1 35 35 #define SIS_ENABLE_MSIX 0x40 36 36 #define SIS_ENABLE_INTX 0x80 37 + #define SIS_SOFT_RESET 0x100 37 38 #define SIS_CMD_READY 0x200 38 39 #define SIS_TRIGGER_SHUTDOWN 0x800000 39 40 #define SIS_PQI_RESET_QUIESCE 0x1000000 ··· 91 90 unsigned long timeout; 92 91 u32 status; 93 92 94 - timeout = (timeout_secs * HZ) + jiffies; 93 + timeout = (timeout_secs * PQI_HZ) + jiffies; 95 94 96 95 while (1) { 97 96 status = readl(&ctrl_info->registers->sis_firmware_status); ··· 203 202 * the top of the loop in order to give the controller time to start 204 203 * processing the command before we start polling. 205 204 */ 206 - timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies; 205 + timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies; 207 206 while (1) { 208 207 msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS); 209 208 doorbell = readl(&registers->sis_ctrl_to_host_doorbell); ··· 349 348 u32 doorbell_register; 350 349 unsigned long timeout; 351 350 352 - timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies; 351 + timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies; 353 352 354 353 while (1) { 355 354 doorbell_register = ··· 419 418 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info) 420 419 { 421 420 return readl(&ctrl_info->registers->sis_driver_scratch); 421 + } 422 + 423 + void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) 424 + { 425 + writel(SIS_SOFT_RESET, 426 + &ctrl_info->registers->sis_host_to_ctrl_doorbell); 422 427 } 423 428 424 429 static void __attribute__((unused)) verify_structures(void)
+1
drivers/scsi/smartpqi/smartpqi_sis.h
··· 33 33 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); 34 34 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); 35 35 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); 36 + void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); 36 37 37 38 #endif /* _SMARTPQI_SIS_H */