Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

isci: move core/controller to host

Now that the data structures are unified unify the implementation in
host.[ch] and cleanup namespace pollution.

Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+3361 -3941
-1
drivers/scsi/isci/Makefile
··· 5 5 host.o task.o probe_roms.o \ 6 6 remote_node_context.o \ 7 7 remote_node_table.o \ 8 - core/scic_sds_controller.o \ 9 8 core/scic_sds_request.o \ 10 9 core/scic_sds_stp_request.o \ 11 10 core/scic_sds_port.o \
drivers/scsi/isci/core/sci_pool.h drivers/scsi/isci/pool.h
-1
drivers/scsi/isci/core/sci_util.c
··· 55 55 56 56 #include <linux/kernel.h> 57 57 #include "sci_util.h" 58 - #include "sci_environment.h" 59 58 #include "request.h" 60 59 61 60 void *scic_request_get_virt_addr(struct scic_sds_request *sci_req, dma_addr_t phys_addr)
-38
drivers/scsi/isci/core/scic_config_parameters.h
··· 229 229 struct scic_sds_oem_params sds1; 230 230 }; 231 231 232 - /** 233 - * scic_user_parameters_set() - This method allows the user to attempt to 234 - * change the user parameters utilized by the controller. 235 - * @controller: This parameter specifies the controller on which to set the 236 - * user parameters. 237 - * @user_parameters: This parameter specifies the USER_PARAMETERS object 238 - * containing the potential new values. 239 - * 240 - * Indicate if the update of the user parameters was successful. SCI_SUCCESS 241 - * This value is returned if the operation succeeded. SCI_FAILURE_INVALID_STATE 242 - * This value is returned if the attempt to change the user parameter failed, 243 - * because changing one of the parameters is not currently allowed. 244 - * SCI_FAILURE_INVALID_PARAMETER_VALUE This value is returned if the user 245 - * supplied an invalid interrupt coalescence time, spin up delay interval, etc. 246 - */ 247 - enum sci_status scic_user_parameters_set( 248 - struct scic_sds_controller *controller, 249 - union scic_user_parameters *user_parameters); 250 - 251 - /** 252 - * scic_oem_parameters_set() - This method allows the user to attempt to change 253 - * the OEM parameters utilized by the controller. 254 - * @controller: This parameter specifies the controller on which to set the 255 - * user parameters. 256 - * @oem_parameters: This parameter specifies the OEM parameters object 257 - * containing the potential new values. 258 - * 259 - * Indicate if the update of the user parameters was successful. SCI_SUCCESS 260 - * This value is returned if the operation succeeded. SCI_FAILURE_INVALID_STATE 261 - * This value is returned if the attempt to change the user parameter failed, 262 - * because changing one of the parameters is not currently allowed. 263 - * SCI_FAILURE_INVALID_PARAMETER_VALUE This value is returned if the user 264 - * supplied an unsupported value for one of the OEM parameters. 265 - */ 266 - enum sci_status scic_oem_parameters_set( 267 - struct scic_sds_controller *controller, 268 - union scic_oem_parameters *oem_parameters); 269 - 270 232 int scic_oem_parameters_validate(struct scic_sds_oem_params *oem); 271 233 272 234 /**
-130
drivers/scsi/isci/core/scic_controller.h
··· 1 - /* 2 - * This file is provided under a dual BSD/GPLv2 license. When using or 3 - * redistributing this file, you may do so under either license. 4 - * 5 - * GPL LICENSE SUMMARY 6 - * 7 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, write to the Free Software 20 - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 - * The full GNU General Public License is included in this distribution 22 - * in the file called LICENSE.GPL. 23 - * 24 - * BSD LICENSE 25 - * 26 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 - * All rights reserved. 28 - * 29 - * Redistribution and use in source and binary forms, with or without 30 - * modification, are permitted provided that the following conditions 31 - * are met: 32 - * 33 - * * Redistributions of source code must retain the above copyright 34 - * notice, this list of conditions and the following disclaimer. 35 - * * Redistributions in binary form must reproduce the above copyright 36 - * notice, this list of conditions and the following disclaimer in 37 - * the documentation and/or other materials provided with the 38 - * distribution. 39 - * * Neither the name of Intel Corporation nor the names of its 40 - * contributors may be used to endorse or promote products derived 41 - * from this software without specific prior written permission. 42 - * 43 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 - */ 55 - 56 - #ifndef _SCIC_CONTROLLER_H_ 57 - #define _SCIC_CONTROLLER_H_ 58 - 59 - #include "scic_config_parameters.h" 60 - 61 - struct scic_sds_request; 62 - struct scic_sds_phy; 63 - struct scic_sds_port; 64 - struct scic_sds_remote_device; 65 - 66 - enum sci_status scic_controller_construct(struct scic_sds_controller *c, 67 - void __iomem *scu_base, 68 - void __iomem *smu_base); 69 - 70 - void scic_controller_enable_interrupts( 71 - struct scic_sds_controller *controller); 72 - 73 - void scic_controller_disable_interrupts( 74 - struct scic_sds_controller *controller); 75 - 76 - enum sci_status scic_controller_initialize( 77 - struct scic_sds_controller *controller); 78 - 79 - u32 scic_controller_get_suggested_start_timeout( 80 - struct scic_sds_controller *controller); 81 - 82 - enum sci_status scic_controller_start( 83 - struct scic_sds_controller *controller, 84 - u32 timeout); 85 - 86 - enum sci_status scic_controller_stop( 87 - struct scic_sds_controller *controller, 88 - u32 timeout); 89 - 90 - enum sci_status scic_controller_reset( 91 - struct scic_sds_controller *controller); 92 - 93 - enum sci_status scic_controller_start_io( 94 - struct scic_sds_controller *controller, 95 - struct scic_sds_remote_device *remote_device, 96 - struct scic_sds_request *io_request, 97 - u16 io_tag); 98 - 99 - enum sci_task_status scic_controller_start_task( 100 - struct scic_sds_controller *controller, 101 - struct scic_sds_remote_device *remote_device, 102 - struct scic_sds_request *task_request, 103 - u16 io_tag); 104 - 105 - enum sci_status scic_controller_terminate_request( 106 - struct scic_sds_controller *controller, 107 - struct scic_sds_remote_device *remote_device, 108 - struct scic_sds_request *request); 109 - 110 - enum sci_status scic_controller_complete_io( 111 - struct scic_sds_controller *controller, 112 - struct scic_sds_remote_device *remote_device, 113 - struct scic_sds_request *io_request); 114 - 115 - enum sci_status scic_controller_get_phy_handle( 116 - struct scic_sds_controller *controller, 117 - u8 phy_index, 118 - struct scic_sds_phy **phy_handle); 119 - 120 - u16 scic_controller_allocate_io_tag( 121 - struct scic_sds_controller *controller); 122 - 123 - enum sci_status scic_controller_free_io_tag( 124 - struct scic_sds_controller *controller, 125 - u16 io_tag); 126 - 127 - struct device; 128 - struct scic_sds_controller *scic_controller_alloc(struct device *dev); 129 - int scic_controller_mem_init(struct scic_sds_controller *scic); 130 - #endif /* _SCIC_CONTROLLER_H_ */
-2973
drivers/scsi/isci/core/scic_sds_controller.c
··· 1 - /* 2 - * This file is provided under a dual BSD/GPLv2 license. When using or 3 - * redistributing this file, you may do so under either license. 4 - * 5 - * GPL LICENSE SUMMARY 6 - * 7 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, write to the Free Software 20 - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 - * The full GNU General Public License is included in this distribution 22 - * in the file called LICENSE.GPL. 23 - * 24 - * BSD LICENSE 25 - * 26 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 - * All rights reserved. 28 - * 29 - * Redistribution and use in source and binary forms, with or without 30 - * modification, are permitted provided that the following conditions 31 - * are met: 32 - * 33 - * * Redistributions of source code must retain the above copyright 34 - * notice, this list of conditions and the following disclaimer. 35 - * * Redistributions in binary form must reproduce the above copyright 36 - * notice, this list of conditions and the following disclaimer in 37 - * the documentation and/or other materials provided with the 38 - * distribution. 39 - * * Neither the name of Intel Corporation nor the names of its 40 - * contributors may be used to endorse or promote products derived 41 - * from this software without specific prior written permission. 42 - * 43 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 - */ 55 - 56 - #include <linux/device.h> 57 - #include <scsi/sas.h> 58 - #include "scic_controller.h" 59 - #include "scic_phy.h" 60 - #include "scic_port.h" 61 - #include "scic_sds_controller.h" 62 - #include "scu_registers.h" 63 - #include "scic_sds_phy.h" 64 - #include "scic_sds_port_configuration_agent.h" 65 - #include "scic_sds_port.h" 66 - #include "remote_device.h" 67 - #include "scic_sds_request.h" 68 - #include "sci_environment.h" 69 - #include "sci_util.h" 70 - #include "scu_completion_codes.h" 71 - #include "scu_event_codes.h" 72 - #include "scu_remote_node_context.h" 73 - #include "scu_task_context.h" 74 - #include "scu_unsolicited_frame.h" 75 - #include "timers.h" 76 - 77 - #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 78 - 79 - /** 80 - * smu_dcc_get_max_ports() - 81 - * 82 - * This macro returns the maximum number of logical ports supported by the 83 - * hardware. The caller passes in the value read from the device context 84 - * capacity register and this macro will mash and shift the value appropriately. 85 - */ 86 - #define smu_dcc_get_max_ports(dcc_value) \ 87 - (\ 88 - (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ 89 - >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ 90 - ) 91 - 92 - /** 93 - * smu_dcc_get_max_task_context() - 94 - * 95 - * This macro returns the maximum number of task contexts supported by the 96 - * hardware. The caller passes in the value read from the device context 97 - * capacity register and this macro will mash and shift the value appropriately. 98 - */ 99 - #define smu_dcc_get_max_task_context(dcc_value) \ 100 - (\ 101 - (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ 102 - >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ 103 - ) 104 - 105 - /** 106 - * smu_dcc_get_max_remote_node_context() - 107 - * 108 - * This macro returns the maximum number of remote node contexts supported by 109 - * the hardware. The caller passes in the value read from the device context 110 - * capacity register and this macro will mash and shift the value appropriately. 111 - */ 112 - #define smu_dcc_get_max_remote_node_context(dcc_value) \ 113 - (\ 114 - (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ 115 - >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ 116 - ) 117 - 118 - 119 - static void scic_sds_controller_power_control_timer_handler( 120 - void *controller); 121 - #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3 122 - #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3 123 - 124 - /** 125 - * 126 - * 127 - * The number of milliseconds to wait for a phy to start. 128 - */ 129 - #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 130 - 131 - /** 132 - * 133 - * 134 - * The number of milliseconds to wait while a given phy is consuming power 135 - * before allowing another set of phys to consume power. Ultimately, this will 136 - * be specified by OEM parameter. 137 - */ 138 - #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 139 - 140 - /** 141 - * COMPLETION_QUEUE_CYCLE_BIT() - 142 - * 143 - * This macro will return the cycle bit of the completion queue entry 144 - */ 145 - #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) 146 - 147 - /** 148 - * NORMALIZE_GET_POINTER() - 149 - * 150 - * This macro will normalize the completion queue get pointer so its value can 151 - * be used as an index into an array 152 - */ 153 - #define NORMALIZE_GET_POINTER(x) \ 154 - ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) 155 - 156 - /** 157 - * NORMALIZE_PUT_POINTER() - 158 - * 159 - * This macro will normalize the completion queue put pointer so its value can 160 - * be used as an array inde 161 - */ 162 - #define NORMALIZE_PUT_POINTER(x) \ 163 - ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) 164 - 165 - 166 - /** 167 - * NORMALIZE_GET_POINTER_CYCLE_BIT() - 168 - * 169 - * This macro will normalize the completion queue cycle pointer so it matches 170 - * the completion queue cycle bit 171 - */ 172 - #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ 173 - ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) 174 - 175 - /** 176 - * NORMALIZE_EVENT_POINTER() - 177 - * 178 - * This macro will normalize the completion queue event entry so its value can 179 - * be used as an index. 180 - */ 181 - #define NORMALIZE_EVENT_POINTER(x) \ 182 - (\ 183 - ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ 184 - >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ 185 - ) 186 - 187 - /** 188 - * INCREMENT_COMPLETION_QUEUE_GET() - 189 - * 190 - * This macro will increment the controllers completion queue index value and 191 - * possibly toggle the cycle bit if the completion queue index wraps back to 0. 192 - */ 193 - #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \ 194 - INCREMENT_QUEUE_GET(\ 195 - (index), \ 196 - (cycle), \ 197 - (controller)->completion_queue_entries, \ 198 - SMU_CQGR_CYCLE_BIT \ 199 - ) 200 - 201 - /** 202 - * INCREMENT_EVENT_QUEUE_GET() - 203 - * 204 - * This macro will increment the controllers event queue index value and 205 - * possibly toggle the event cycle bit if the event queue index wraps back to 0. 206 - */ 207 - #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \ 208 - INCREMENT_QUEUE_GET(\ 209 - (index), \ 210 - (cycle), \ 211 - (controller)->completion_event_entries, \ 212 - SMU_CQGR_EVENT_CYCLE_BIT \ 213 - ) 214 - 215 - static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) 216 - { 217 - struct isci_host *ihost = scic_to_ihost(scic); 218 - scic->power_control.timer = isci_timer_create(ihost, 219 - scic, 220 - scic_sds_controller_power_control_timer_handler); 221 - 222 - memset(scic->power_control.requesters, 0, 223 - sizeof(scic->power_control.requesters)); 224 - 225 - scic->power_control.phys_waiting = 0; 226 - scic->power_control.phys_granted_power = 0; 227 - } 228 - 229 - int scic_controller_mem_init(struct scic_sds_controller *scic) 230 - { 231 - struct device *dev = scic_to_dev(scic); 232 - dma_addr_t dma_handle; 233 - enum sci_status result; 234 - 235 - scic->completion_queue = dmam_alloc_coherent(dev, 236 - scic->completion_queue_entries * sizeof(u32), 237 - &dma_handle, GFP_KERNEL); 238 - if (!scic->completion_queue) 239 - return -ENOMEM; 240 - 241 - writel(lower_32_bits(dma_handle), 242 - &scic->smu_registers->completion_queue_lower); 243 - writel(upper_32_bits(dma_handle), 244 - &scic->smu_registers->completion_queue_upper); 245 - 246 - scic->remote_node_context_table = dmam_alloc_coherent(dev, 247 - scic->remote_node_entries * 248 - sizeof(union scu_remote_node_context), 249 - &dma_handle, GFP_KERNEL); 250 - if (!scic->remote_node_context_table) 251 - return -ENOMEM; 252 - 253 - writel(lower_32_bits(dma_handle), 254 - &scic->smu_registers->remote_node_context_lower); 255 - writel(upper_32_bits(dma_handle), 256 - &scic->smu_registers->remote_node_context_upper); 257 - 258 - scic->task_context_table = dmam_alloc_coherent(dev, 259 - scic->task_context_entries * 260 - sizeof(struct scu_task_context), 261 - &dma_handle, GFP_KERNEL); 262 - if (!scic->task_context_table) 263 - return -ENOMEM; 264 - 265 - writel(lower_32_bits(dma_handle), 266 - &scic->smu_registers->host_task_table_lower); 267 - writel(upper_32_bits(dma_handle), 268 - &scic->smu_registers->host_task_table_upper); 269 - 270 - result = scic_sds_unsolicited_frame_control_construct(scic); 271 - if (result) 272 - return result; 273 - 274 - /* 275 - * Inform the silicon as to the location of the UF headers and 276 - * address table. 277 - */ 278 - writel(lower_32_bits(scic->uf_control.headers.physical_address), 279 - &scic->scu_registers->sdma.uf_header_base_address_lower); 280 - writel(upper_32_bits(scic->uf_control.headers.physical_address), 281 - &scic->scu_registers->sdma.uf_header_base_address_upper); 282 - 283 - writel(lower_32_bits(scic->uf_control.address_table.physical_address), 284 - &scic->scu_registers->sdma.uf_address_table_lower); 285 - writel(upper_32_bits(scic->uf_control.address_table.physical_address), 286 - &scic->scu_registers->sdma.uf_address_table_upper); 287 - 288 - return 0; 289 - } 290 - 291 - /** 292 - * This method initializes the task context data for the controller. 293 - * @scic: 294 - * 295 - */ 296 - static void 297 - scic_sds_controller_assign_task_entries(struct scic_sds_controller *controller) 298 - { 299 - u32 task_assignment; 300 - 301 - /* 302 - * Assign all the TCs to function 0 303 - * TODO: Do we actually need to read this register to write it back? 304 - */ 305 - 306 - task_assignment = 307 - readl(&controller->smu_registers->task_context_assignment[0]); 308 - 309 - task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | 310 - (SMU_TCA_GEN_VAL(ENDING, controller->task_context_entries - 1)) | 311 - (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); 312 - 313 - writel(task_assignment, 314 - &controller->smu_registers->task_context_assignment[0]); 315 - 316 - } 317 - 318 - /** 319 - * This method initializes the hardware completion queue. 320 - * 321 - * 322 - */ 323 - static void scic_sds_controller_initialize_completion_queue( 324 - struct scic_sds_controller *scic) 325 - { 326 - u32 index; 327 - u32 completion_queue_control_value; 328 - u32 completion_queue_get_value; 329 - u32 completion_queue_put_value; 330 - 331 - scic->completion_queue_get = 0; 332 - 333 - completion_queue_control_value = ( 334 - SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1) 335 - | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1) 336 - ); 337 - 338 - writel(completion_queue_control_value, 339 - &scic->smu_registers->completion_queue_control); 340 - 341 - 342 - /* Set the completion queue get pointer and enable the queue */ 343 - completion_queue_get_value = ( 344 - (SMU_CQGR_GEN_VAL(POINTER, 0)) 345 - | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) 346 - | (SMU_CQGR_GEN_BIT(ENABLE)) 347 - | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) 348 - ); 349 - 350 - writel(completion_queue_get_value, 351 - &scic->smu_registers->completion_queue_get); 352 - 353 - /* Set the completion queue put pointer */ 354 - completion_queue_put_value = ( 355 - (SMU_CQPR_GEN_VAL(POINTER, 0)) 356 - | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) 357 - ); 358 - 359 - writel(completion_queue_put_value, 360 - &scic->smu_registers->completion_queue_put); 361 - 362 - /* Initialize the cycle bit of the completion queue entries */ 363 - for (index = 0; index < scic->completion_queue_entries; index++) { 364 - /* 365 - * If get.cycle_bit != completion_queue.cycle_bit 366 - * its not a valid completion queue entry 367 - * so at system start all entries are invalid */ 368 - scic->completion_queue[index] = 0x80000000; 369 - } 370 - } 371 - 372 - /** 373 - * This method initializes the hardware unsolicited frame queue. 374 - * 375 - * 376 - */ 377 - static void scic_sds_controller_initialize_unsolicited_frame_queue( 378 - struct scic_sds_controller *scic) 379 - { 380 - u32 frame_queue_control_value; 381 - u32 frame_queue_get_value; 382 - u32 frame_queue_put_value; 383 - 384 - /* Write the queue size */ 385 - frame_queue_control_value = 386 - SCU_UFQC_GEN_VAL(QUEUE_SIZE, 387 - scic->uf_control.address_table.count); 388 - 389 - writel(frame_queue_control_value, 390 - &scic->scu_registers->sdma.unsolicited_frame_queue_control); 391 - 392 - /* Setup the get pointer for the unsolicited frame queue */ 393 - frame_queue_get_value = ( 394 - SCU_UFQGP_GEN_VAL(POINTER, 0) 395 - | SCU_UFQGP_GEN_BIT(ENABLE_BIT) 396 - ); 397 - 398 - writel(frame_queue_get_value, 399 - &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 400 - /* Setup the put pointer for the unsolicited frame queue */ 401 - frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); 402 - writel(frame_queue_put_value, 403 - &scic->scu_registers->sdma.unsolicited_frame_put_pointer); 404 - } 405 - 406 - /** 407 - * This method enables the hardware port task scheduler. 408 - * 409 - * 410 - */ 411 - static void scic_sds_controller_enable_port_task_scheduler( 412 - struct scic_sds_controller *scic) 413 - { 414 - u32 port_task_scheduler_value; 415 - 416 - port_task_scheduler_value = 417 - readl(&scic->scu_registers->peg0.ptsg.control); 418 - port_task_scheduler_value |= 419 - (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | 420 - SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); 421 - writel(port_task_scheduler_value, 422 - &scic->scu_registers->peg0.ptsg.control); 423 - } 424 - 425 - /** 426 - * 427 - * 428 - * This macro is used to delay between writes to the AFE registers during AFE 429 - * initialization. 430 - */ 431 - #define AFE_REGISTER_WRITE_DELAY 10 432 - 433 - /* Initialize the AFE for this phy index. We need to read the AFE setup from 434 - * the OEM parameters none 435 - */ 436 - static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) 437 - { 438 - const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; 439 - u32 afe_status; 440 - u32 phy_id; 441 - 442 - /* Clear DFX Status registers */ 443 - writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0); 444 - udelay(AFE_REGISTER_WRITE_DELAY); 445 - 446 - if (is_b0()) { 447 - /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 448 - * Timer, PM Stagger Timer */ 449 - writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2); 450 - udelay(AFE_REGISTER_WRITE_DELAY); 451 - } 452 - 453 - /* Configure bias currents to normal */ 454 - if (is_a0()) 455 - writel(0x00005500, &scic->scu_registers->afe.afe_bias_control); 456 - else if (is_a2()) 457 - writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control); 458 - else if (is_b0()) 459 - writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control); 460 - 461 - udelay(AFE_REGISTER_WRITE_DELAY); 462 - 463 - /* Enable PLL */ 464 - if (is_b0()) 465 - writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0); 466 - else 467 - writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0); 468 - 469 - udelay(AFE_REGISTER_WRITE_DELAY); 470 - 471 - /* Wait for the PLL to lock */ 472 - do { 473 - afe_status = readl(&scic->scu_registers->afe.afe_common_block_status); 474 - udelay(AFE_REGISTER_WRITE_DELAY); 475 - } while ((afe_status & 0x00001000) == 0); 476 - 477 - if (is_a0() || is_a2()) { 478 - /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 479 - writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0); 480 - udelay(AFE_REGISTER_WRITE_DELAY); 481 - } 482 - 483 - for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 484 - const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 485 - 486 - if (is_b0()) { 487 - /* Configure transmitter SSC parameters */ 488 - writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 489 - udelay(AFE_REGISTER_WRITE_DELAY); 490 - } else { 491 - /* 492 - * All defaults, except the Receive Word Alignament/Comma Detect 493 - * Enable....(0xe800) */ 494 - writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 495 - udelay(AFE_REGISTER_WRITE_DELAY); 496 - 497 - writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 498 - udelay(AFE_REGISTER_WRITE_DELAY); 499 - } 500 - 501 - /* 502 - * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 503 - * & increase TX int & ext bias 20%....(0xe85c) */ 504 - if (is_a0()) 505 - writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 506 - else if (is_a2()) 507 - writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 508 - else { 509 - /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 510 - writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 511 - udelay(AFE_REGISTER_WRITE_DELAY); 512 - 513 - /* 514 - * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 515 - * & increase TX int & ext bias 20%....(0xe85c) */ 516 - writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 517 - } 518 - udelay(AFE_REGISTER_WRITE_DELAY); 519 - 520 - if (is_a0() || is_a2()) { 521 - /* Enable TX equalization (0xe824) */ 522 - writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 523 - udelay(AFE_REGISTER_WRITE_DELAY); 524 - } 525 - 526 - /* 527 - * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 528 - * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 529 - writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 530 - udelay(AFE_REGISTER_WRITE_DELAY); 531 - 532 - /* Leave DFE/FFE on */ 533 - if (is_a0()) 534 - writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 535 - else if (is_a2()) 536 - writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 537 - else { 538 - writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 539 - udelay(AFE_REGISTER_WRITE_DELAY); 540 - /* Enable TX equalization (0xe824) */ 541 - writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 542 - } 543 - udelay(AFE_REGISTER_WRITE_DELAY); 544 - 545 - writel(oem_phy->afe_tx_amp_control0, 546 - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); 547 - udelay(AFE_REGISTER_WRITE_DELAY); 548 - 549 - writel(oem_phy->afe_tx_amp_control1, 550 - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); 551 - udelay(AFE_REGISTER_WRITE_DELAY); 552 - 553 - writel(oem_phy->afe_tx_amp_control2, 554 - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); 555 - udelay(AFE_REGISTER_WRITE_DELAY); 556 - 557 - writel(oem_phy->afe_tx_amp_control3, 558 - &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); 559 - udelay(AFE_REGISTER_WRITE_DELAY); 560 - } 561 - 562 - /* Transfer control to the PEs */ 563 - writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0); 564 - udelay(AFE_REGISTER_WRITE_DELAY); 565 - } 566 - 567 - /* 568 - * ****************************************************************************- 569 - * * SCIC SDS Controller Internal Start/Stop Routines 570 - * ****************************************************************************- */ 571 - 572 - 573 - /** 574 - * This method will attempt to transition into the ready state for the 575 - * controller and indicate that the controller start operation has completed 576 - * if all criteria are met. 577 - * @scic: This parameter indicates the controller object for which 578 - * to transition to ready. 579 - * @status: This parameter indicates the status value to be pass into the call 580 - * to scic_cb_controller_start_complete(). 581 - * 582 - * none. 583 - */ 584 - static void scic_sds_controller_transition_to_ready( 585 - struct scic_sds_controller *scic, 586 - enum sci_status status) 587 - { 588 - struct isci_host *ihost = scic_to_ihost(scic); 589 - 590 - if (scic->state_machine.current_state_id == 591 - SCI_BASE_CONTROLLER_STATE_STARTING) { 592 - /* 593 - * We move into the ready state, because some of the phys/ports 594 - * may be up and operational. 595 - */ 596 - sci_base_state_machine_change_state(&scic->state_machine, 597 - SCI_BASE_CONTROLLER_STATE_READY); 598 - 599 - isci_host_start_complete(ihost, status); 600 - } 601 - } 602 - 603 - static void scic_sds_controller_timeout_handler(void *_scic) 604 - { 605 - struct scic_sds_controller *scic = _scic; 606 - struct isci_host *ihost = scic_to_ihost(scic); 607 - struct sci_base_state_machine *sm = &scic->state_machine; 608 - 609 - if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING) 610 - scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT); 611 - else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) { 612 - sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED); 613 - isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 614 - } else /* / @todo Now what do we want to do in this case? */ 615 - dev_err(scic_to_dev(scic), 616 - "%s: Controller timer fired when controller was not " 617 - "in a state being timed.\n", 618 - __func__); 619 - } 620 - 621 - static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) 622 - { 623 - u32 index; 624 - enum sci_status port_status; 625 - enum sci_status status = SCI_SUCCESS; 626 - struct isci_host *ihost = scic_to_ihost(scic); 627 - 628 - for (index = 0; index < scic->logical_port_entries; index++) { 629 - struct scic_sds_port *sci_port = &ihost->ports[index].sci; 630 - scic_sds_port_handler_t stop; 631 - 632 - stop = sci_port->state_handlers->stop_handler; 633 - port_status = stop(sci_port); 634 - 635 - if ((port_status != SCI_SUCCESS) && 636 - (port_status != SCI_FAILURE_INVALID_STATE)) { 637 - status = SCI_FAILURE; 638 - 639 - dev_warn(scic_to_dev(scic), 640 - "%s: Controller stop operation failed to " 641 - "stop port %d because of status %d.\n", 642 - __func__, 643 - sci_port->logical_port_index, 644 - port_status); 645 - } 646 - } 647 - 648 - return status; 649 - } 650 - 651 - static inline void scic_sds_controller_phy_timer_start( 652 - struct scic_sds_controller *scic) 653 - { 654 - isci_timer_start(scic->phy_startup_timer, 655 - SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); 656 - 657 - scic->phy_startup_timer_pending = true; 658 - } 659 - 660 - static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic) 661 - { 662 - isci_timer_stop(scic->phy_startup_timer); 663 - 664 - scic->phy_startup_timer_pending = false; 665 - } 666 - 667 - /** 668 - * scic_sds_controller_start_next_phy - start phy 669 - * @scic: controller 670 - * 671 - * If all the phys have been started, then attempt to transition the 672 - * controller to the READY state and inform the user 673 - * (scic_cb_controller_start_complete()). 674 - */ 675 - static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) 676 - { 677 - struct isci_host *ihost = scic_to_ihost(scic); 678 - struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; 679 - struct scic_sds_phy *sci_phy; 680 - enum sci_status status; 681 - 682 - status = SCI_SUCCESS; 683 - 684 - if (scic->phy_startup_timer_pending) 685 - return status; 686 - 687 - if (scic->next_phy_to_start >= SCI_MAX_PHYS) { 688 - bool is_controller_start_complete = true; 689 - u32 state; 690 - u8 index; 691 - 692 - for (index = 0; index < SCI_MAX_PHYS; index++) { 693 - sci_phy = &ihost->phys[index].sci; 694 - state = sci_phy->state_machine.current_state_id; 695 - 696 - if (!scic_sds_phy_get_port(sci_phy)) 697 - continue; 698 - 699 - /* The controller start operation is complete iff: 700 - * - all links have been given an opportunity to start 701 - * - have no indication of a connected device 702 - * - have an indication of a connected device and it has 703 - * finished the link training process. 704 - */ 705 - if ((sci_phy->is_in_link_training == false && 706 - state == SCI_BASE_PHY_STATE_INITIAL) || 707 - (sci_phy->is_in_link_training == false && 708 - state == SCI_BASE_PHY_STATE_STOPPED) || 709 - (sci_phy->is_in_link_training == true && 710 - state == SCI_BASE_PHY_STATE_STARTING)) { 711 - is_controller_start_complete = false; 712 - break; 713 - } 714 - } 715 - 716 - /* 717 - * The controller has successfully finished the start process. 718 - * Inform the SCI Core user and transition to the READY state. */ 719 - if (is_controller_start_complete == true) { 720 - scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS); 721 - scic_sds_controller_phy_timer_stop(scic); 722 - } 723 - } else { 724 - sci_phy = &ihost->phys[scic->next_phy_to_start].sci; 725 - 726 - if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 727 - if (scic_sds_phy_get_port(sci_phy) == NULL) { 728 - scic->next_phy_to_start++; 729 - 730 - /* Caution recursion ahead be forwarned 731 - * 732 - * The PHY was never added to a PORT in MPC mode 733 - * so start the next phy in sequence This phy 734 - * will never go link up and will not draw power 735 - * the OEM parameters either configured the phy 736 - * incorrectly for the PORT or it was never 737 - * assigned to a PORT 738 - */ 739 - return scic_sds_controller_start_next_phy(scic); 740 - } 741 - } 742 - 743 - status = scic_sds_phy_start(sci_phy); 744 - 745 - if (status == SCI_SUCCESS) { 746 - scic_sds_controller_phy_timer_start(scic); 747 - } else { 748 - dev_warn(scic_to_dev(scic), 749 - "%s: Controller stop operation failed " 750 - "to stop phy %d because of status " 751 - "%d.\n", 752 - __func__, 753 - ihost->phys[scic->next_phy_to_start].sci.phy_index, 754 - status); 755 - } 756 - 757 - scic->next_phy_to_start++; 758 - } 759 - 760 - return status; 761 - } 762 - 763 - static void scic_sds_controller_phy_startup_timeout_handler(void *_scic) 764 - { 765 - struct scic_sds_controller *scic = _scic; 766 - enum sci_status status; 767 - 768 - scic->phy_startup_timer_pending = false; 769 - status = SCI_FAILURE; 770 - while (status != SCI_SUCCESS) 771 - status = scic_sds_controller_start_next_phy(scic); 772 - } 773 - 774 - static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic) 775 - { 776 - struct isci_host *ihost = scic_to_ihost(scic); 777 - 778 - scic->phy_startup_timer = isci_timer_create(ihost, 779 - scic, 780 - scic_sds_controller_phy_startup_timeout_handler); 781 - 782 - if (scic->phy_startup_timer == NULL) 783 - return SCI_FAILURE_INSUFFICIENT_RESOURCES; 784 - else { 785 - scic->next_phy_to_start = 0; 786 - scic->phy_startup_timer_pending = false; 787 - } 788 - 789 - return SCI_SUCCESS; 790 - } 791 - 792 - static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic) 793 - { 794 - u32 index; 795 - enum sci_status status; 796 - enum sci_status phy_status; 797 - struct isci_host *ihost = scic_to_ihost(scic); 798 - 799 - status = SCI_SUCCESS; 800 - 801 - for (index = 0; index < SCI_MAX_PHYS; index++) { 802 - phy_status = scic_sds_phy_stop(&ihost->phys[index].sci); 803 - 804 - if (phy_status != SCI_SUCCESS && 805 - phy_status != SCI_FAILURE_INVALID_STATE) { 806 - status = SCI_FAILURE; 807 - 808 - dev_warn(scic_to_dev(scic), 809 - "%s: Controller stop operation failed to stop " 810 - "phy %d because of status %d.\n", 811 - __func__, 812 - ihost->phys[index].sci.phy_index, phy_status); 813 - } 814 - } 815 - 816 - return status; 817 - } 818 - 819 - static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic) 820 - { 821 - u32 index; 822 - enum sci_status status; 823 - enum sci_status device_status; 824 - 825 - status = SCI_SUCCESS; 826 - 827 - for (index = 0; index < scic->remote_node_entries; index++) { 828 - if (scic->device_table[index] != NULL) { 829 - /* / @todo What timeout value do we want to provide to this request? */ 830 - device_status = scic_remote_device_stop(scic->device_table[index], 0); 831 - 832 - if ((device_status != SCI_SUCCESS) && 833 - (device_status != SCI_FAILURE_INVALID_STATE)) { 834 - dev_warn(scic_to_dev(scic), 835 - "%s: Controller stop operation failed " 836 - "to stop device 0x%p because of " 837 - "status %d.\n", 838 - __func__, 839 - scic->device_table[index], device_status); 840 - } 841 - } 842 - } 843 - 844 - return status; 845 - } 846 - 847 - static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic) 848 - { 849 - isci_timer_start(scic->power_control.timer, 850 - SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 851 - 852 - scic->power_control.timer_started = true; 853 - } 854 - 855 - static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic) 856 - { 857 - if (scic->power_control.timer_started) { 858 - isci_timer_stop(scic->power_control.timer); 859 - scic->power_control.timer_started = false; 860 - } 861 - } 862 - 863 - static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic) 864 - { 865 - scic_sds_controller_power_control_timer_stop(scic); 866 - scic_sds_controller_power_control_timer_start(scic); 867 - } 868 - 869 - static void scic_sds_controller_power_control_timer_handler( 870 - void *controller) 871 - { 872 - struct scic_sds_controller *scic; 873 - 874 - scic = (struct scic_sds_controller *)controller; 875 - 876 - scic->power_control.phys_granted_power = 0; 877 - 878 - if (scic->power_control.phys_waiting == 0) { 879 - scic->power_control.timer_started = false; 880 - } else { 881 - struct scic_sds_phy *sci_phy = NULL; 882 - u8 i; 883 - 884 - for (i = 0; 885 - (i < SCI_MAX_PHYS) 886 - && (scic->power_control.phys_waiting != 0); 887 - i++) { 888 - if (scic->power_control.requesters[i] != NULL) { 889 - if (scic->power_control.phys_granted_power < 890 - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { 891 - sci_phy = scic->power_control.requesters[i]; 892 - scic->power_control.requesters[i] = NULL; 893 - scic->power_control.phys_waiting--; 894 - scic->power_control.phys_granted_power++; 895 - scic_sds_phy_consume_power_handler(sci_phy); 896 - } else { 897 - break; 898 - } 899 - } 900 - } 901 - 902 - /* 903 - * It doesn't matter if the power list is empty, we need to start the 904 - * timer in case another phy becomes ready. 905 - */ 906 - scic_sds_controller_power_control_timer_start(scic); 907 - } 908 - } 909 - 910 - /** 911 - * This method inserts the phy in the stagger spinup control queue. 912 - * @scic: 913 - * 914 - * 915 - */ 916 - void scic_sds_controller_power_control_queue_insert( 917 - struct scic_sds_controller *scic, 918 - struct scic_sds_phy *sci_phy) 919 - { 920 - BUG_ON(sci_phy == NULL); 921 - 922 - if (scic->power_control.phys_granted_power < 923 - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { 924 - scic->power_control.phys_granted_power++; 925 - scic_sds_phy_consume_power_handler(sci_phy); 926 - 927 - /* 928 - * stop and start the power_control timer. When the timer fires, the 929 - * no_of_phys_granted_power will be set to 0 930 - */ 931 - scic_sds_controller_power_control_timer_restart(scic); 932 - } else { 933 - /* Add the phy in the waiting list */ 934 - scic->power_control.requesters[sci_phy->phy_index] = sci_phy; 935 - scic->power_control.phys_waiting++; 936 - } 937 - } 938 - 939 - /** 940 - * This method removes the phy from the stagger spinup control queue. 941 - * @scic: 942 - * 943 - * 944 - */ 945 - void scic_sds_controller_power_control_queue_remove( 946 - struct scic_sds_controller *scic, 947 - struct scic_sds_phy *sci_phy) 948 - { 949 - BUG_ON(sci_phy == NULL); 950 - 951 - if (scic->power_control.requesters[sci_phy->phy_index] != NULL) { 952 - scic->power_control.phys_waiting--; 953 - } 954 - 955 - scic->power_control.requesters[sci_phy->phy_index] = NULL; 956 - } 957 - 958 - /* 959 - * ****************************************************************************- 960 - * * SCIC SDS Controller Completion Routines 961 - * ****************************************************************************- */ 962 - 963 - /** 964 - * This method returns a true value if the completion queue has entries that 965 - * can be processed 966 - * @scic: 967 - * 968 - * bool true if the completion queue has entries to process false if the 969 - * completion queue has no entries to process 970 - */ 971 - static bool scic_sds_controller_completion_queue_has_entries( 972 - struct scic_sds_controller *scic) 973 - { 974 - u32 get_value = scic->completion_queue_get; 975 - u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; 976 - 977 - if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == 978 - COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])) 979 - return true; 980 - 981 - return false; 982 - } 983 - 984 - /** 985 - * This method processes a task completion notification. This is called from 986 - * within the controller completion handler. 987 - * @scic: 988 - * @completion_entry: 989 - * 990 - */ 991 - static void scic_sds_controller_task_completion( 992 - struct scic_sds_controller *scic, 993 - u32 completion_entry) 994 - { 995 - u32 index; 996 - struct scic_sds_request *io_request; 997 - 998 - index = SCU_GET_COMPLETION_INDEX(completion_entry); 999 - io_request = scic->io_request_table[index]; 1000 - 1001 - /* Make sure that we really want to process this IO request */ 1002 - if ( 1003 - (io_request != NULL) 1004 - && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) 1005 - && ( 1006 - scic_sds_io_tag_get_sequence(io_request->io_tag) 1007 - == scic->io_request_sequence[index] 1008 - ) 1009 - ) { 1010 - /* Yep this is a valid io request pass it along to the io request handler */ 1011 - scic_sds_io_request_tc_completion(io_request, completion_entry); 1012 - } 1013 - } 1014 - 1015 - /** 1016 - * This method processes an SDMA completion event. This is called from within 1017 - * the controller completion handler. 1018 - * @scic: 1019 - * @completion_entry: 1020 - * 1021 - */ 1022 - static void scic_sds_controller_sdma_completion( 1023 - struct scic_sds_controller *scic, 1024 - u32 completion_entry) 1025 - { 1026 - u32 index; 1027 - struct scic_sds_request *io_request; 1028 - struct scic_sds_remote_device *device; 1029 - 1030 - index = SCU_GET_COMPLETION_INDEX(completion_entry); 1031 - 1032 - switch (scu_get_command_request_type(completion_entry)) { 1033 - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 1034 - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 1035 - io_request = scic->io_request_table[index]; 1036 - dev_warn(scic_to_dev(scic), 1037 - "%s: SCIC SDS Completion type SDMA %x for io request " 1038 - "%p\n", 1039 - __func__, 1040 - completion_entry, 1041 - io_request); 1042 - /* @todo For a post TC operation we need to fail the IO 1043 - * request 1044 - */ 1045 - break; 1046 - 1047 - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: 1048 - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: 1049 - case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 1050 - device = scic->device_table[index]; 1051 - dev_warn(scic_to_dev(scic), 1052 - "%s: SCIC SDS Completion type SDMA %x for remote " 1053 - "device %p\n", 1054 - __func__, 1055 - completion_entry, 1056 - device); 1057 - /* @todo For a port RNC operation we need to fail the 1058 - * device 1059 - */ 1060 - break; 1061 - 1062 - default: 1063 - dev_warn(scic_to_dev(scic), 1064 - "%s: SCIC SDS Completion unknown SDMA completion " 1065 - "type %x\n", 1066 - __func__, 1067 - completion_entry); 1068 - break; 1069 - 1070 - } 1071 - } 1072 - 1073 - static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic, 1074 - u32 completion_entry) 1075 - { 1076 - u32 index; 1077 - u32 frame_index; 1078 - 1079 - struct isci_host *ihost = scic_to_ihost(scic); 1080 - struct scu_unsolicited_frame_header *frame_header; 1081 - struct scic_sds_phy *phy; 1082 - struct scic_sds_remote_device *device; 1083 - 1084 - enum sci_status result = SCI_FAILURE; 1085 - 1086 - frame_index = SCU_GET_FRAME_INDEX(completion_entry); 1087 - 1088 - frame_header = scic->uf_control.buffers.array[frame_index].header; 1089 - scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; 1090 - 1091 - if (SCU_GET_FRAME_ERROR(completion_entry)) { 1092 - /* 1093 - * / @todo If the IAF frame or SIGNATURE FIS frame has an error will 1094 - * / this cause a problem? We expect the phy initialization will 1095 - * / fail if there is an error in the frame. */ 1096 - scic_sds_controller_release_frame(scic, frame_index); 1097 - return; 1098 - } 1099 - 1100 - if (frame_header->is_address_frame) { 1101 - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 1102 - phy = &ihost->phys[index].sci; 1103 - result = scic_sds_phy_frame_handler(phy, frame_index); 1104 - } else { 1105 - 1106 - index = SCU_GET_COMPLETION_INDEX(completion_entry); 1107 - 1108 - if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 1109 - /* 1110 - * This is a signature fis or a frame from a direct attached SATA 1111 - * device that has not yet been created. In either case forwared 1112 - * the frame to the PE and let it take care of the frame data. */ 1113 - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 1114 - phy = &ihost->phys[index].sci; 1115 - result = scic_sds_phy_frame_handler(phy, frame_index); 1116 - } else { 1117 - if (index < scic->remote_node_entries) 1118 - device = scic->device_table[index]; 1119 - else 1120 - device = NULL; 1121 - 1122 - if (device != NULL) 1123 - result = scic_sds_remote_device_frame_handler(device, frame_index); 1124 - else 1125 - scic_sds_controller_release_frame(scic, frame_index); 1126 - } 1127 - } 1128 - 1129 - if (result != SCI_SUCCESS) { 1130 - /* 1131 - * / @todo Is there any reason to report some additional error message 1132 - * / when we get this failure notifiction? */ 1133 - } 1134 - } 1135 - 1136 - static void scic_sds_controller_event_completion(struct scic_sds_controller *scic, 1137 - u32 completion_entry) 1138 - { 1139 - struct isci_host *ihost = scic_to_ihost(scic); 1140 - struct scic_sds_request *io_request; 1141 - struct scic_sds_remote_device *device; 1142 - struct scic_sds_phy *phy; 1143 - u32 index; 1144 - 1145 - index = SCU_GET_COMPLETION_INDEX(completion_entry); 1146 - 1147 - switch (scu_get_event_type(completion_entry)) { 1148 - case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: 1149 - /* / @todo The driver did something wrong and we need to fix the condtion. */ 1150 - dev_err(scic_to_dev(scic), 1151 - "%s: SCIC Controller 0x%p received SMU command error " 1152 - "0x%x\n", 1153 - __func__, 1154 - scic, 1155 - completion_entry); 1156 - break; 1157 - 1158 - case SCU_EVENT_TYPE_SMU_PCQ_ERROR: 1159 - case SCU_EVENT_TYPE_SMU_ERROR: 1160 - case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: 1161 - /* 1162 - * / @todo This is a hardware failure and its likely that we want to 1163 - * / reset the controller. */ 1164 - dev_err(scic_to_dev(scic), 1165 - "%s: SCIC Controller 0x%p received fatal controller " 1166 - "event 0x%x\n", 1167 - __func__, 1168 - scic, 1169 - completion_entry); 1170 - break; 1171 - 1172 - case SCU_EVENT_TYPE_TRANSPORT_ERROR: 1173 - io_request = scic->io_request_table[index]; 1174 - scic_sds_io_request_event_handler(io_request, completion_entry); 1175 - break; 1176 - 1177 - case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 1178 - switch (scu_get_event_specifier(completion_entry)) { 1179 - case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 1180 - case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 1181 - io_request = scic->io_request_table[index]; 1182 - if (io_request != NULL) 1183 - scic_sds_io_request_event_handler(io_request, completion_entry); 1184 - else 1185 - dev_warn(scic_to_dev(scic), 1186 - "%s: SCIC Controller 0x%p received " 1187 - "event 0x%x for io request object " 1188 - "that doesnt exist.\n", 1189 - __func__, 1190 - scic, 1191 - completion_entry); 1192 - 1193 - break; 1194 - 1195 - case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: 1196 - device = scic->device_table[index]; 1197 - if (device != NULL) 1198 - scic_sds_remote_device_event_handler(device, completion_entry); 1199 - else 1200 - dev_warn(scic_to_dev(scic), 1201 - "%s: SCIC Controller 0x%p received " 1202 - "event 0x%x for remote device object " 1203 - "that doesnt exist.\n", 1204 - __func__, 1205 - scic, 1206 - completion_entry); 1207 - 1208 - break; 1209 - } 1210 - break; 1211 - 1212 - case SCU_EVENT_TYPE_BROADCAST_CHANGE: 1213 - /* 1214 - * direct the broadcast change event to the phy first and then let 1215 - * the phy redirect the broadcast change to the port object */ 1216 - case SCU_EVENT_TYPE_ERR_CNT_EVENT: 1217 - /* 1218 - * direct error counter event to the phy object since that is where 1219 - * we get the event notification. This is a type 4 event. */ 1220 - case SCU_EVENT_TYPE_OSSP_EVENT: 1221 - index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 1222 - phy = &ihost->phys[index].sci; 1223 - scic_sds_phy_event_handler(phy, completion_entry); 1224 - break; 1225 - 1226 - case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 1227 - case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 1228 - case SCU_EVENT_TYPE_RNC_OPS_MISC: 1229 - if (index < scic->remote_node_entries) { 1230 - device = scic->device_table[index]; 1231 - 1232 - if (device != NULL) 1233 - scic_sds_remote_device_event_handler(device, completion_entry); 1234 - } else 1235 - dev_err(scic_to_dev(scic), 1236 - "%s: SCIC Controller 0x%p received event 0x%x " 1237 - "for remote device object 0x%0x that doesnt " 1238 - "exist.\n", 1239 - __func__, 1240 - scic, 1241 - completion_entry, 1242 - index); 1243 - 1244 - break; 1245 - 1246 - default: 1247 - dev_warn(scic_to_dev(scic), 1248 - "%s: SCIC Controller received unknown event code %x\n", 1249 - __func__, 1250 - completion_entry); 1251 - break; 1252 - } 1253 - } 1254 - 1255 - /** 1256 - * This method is a private routine for processing the completion queue entries. 1257 - * @scic: 1258 - * 1259 - */ 1260 - static void scic_sds_controller_process_completions( 1261 - struct scic_sds_controller *scic) 1262 - { 1263 - u32 completion_count = 0; 1264 - u32 completion_entry; 1265 - u32 get_index; 1266 - u32 get_cycle; 1267 - u32 event_index; 1268 - u32 event_cycle; 1269 - 1270 - dev_dbg(scic_to_dev(scic), 1271 - "%s: completion queue begining get:0x%08x\n", 1272 - __func__, 1273 - scic->completion_queue_get); 1274 - 1275 - /* Get the component parts of the completion queue */ 1276 - get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get); 1277 - get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get; 1278 - 1279 - event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get); 1280 - event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get; 1281 - 1282 - while ( 1283 - NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) 1284 - == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]) 1285 - ) { 1286 - completion_count++; 1287 - 1288 - completion_entry = scic->completion_queue[get_index]; 1289 - INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle); 1290 - 1291 - dev_dbg(scic_to_dev(scic), 1292 - "%s: completion queue entry:0x%08x\n", 1293 - __func__, 1294 - completion_entry); 1295 - 1296 - switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { 1297 - case SCU_COMPLETION_TYPE_TASK: 1298 - scic_sds_controller_task_completion(scic, completion_entry); 1299 - break; 1300 - 1301 - case SCU_COMPLETION_TYPE_SDMA: 1302 - scic_sds_controller_sdma_completion(scic, completion_entry); 1303 - break; 1304 - 1305 - case SCU_COMPLETION_TYPE_UFI: 1306 - scic_sds_controller_unsolicited_frame(scic, completion_entry); 1307 - break; 1308 - 1309 - case SCU_COMPLETION_TYPE_EVENT: 1310 - INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle); 1311 - scic_sds_controller_event_completion(scic, completion_entry); 1312 - break; 1313 - 1314 - case SCU_COMPLETION_TYPE_NOTIFY: 1315 - /* 1316 - * Presently we do the same thing with a notify event that we do with the 1317 - * other event codes. */ 1318 - INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle); 1319 - scic_sds_controller_event_completion(scic, completion_entry); 1320 - break; 1321 - 1322 - default: 1323 - dev_warn(scic_to_dev(scic), 1324 - "%s: SCIC Controller received unknown " 1325 - "completion type %x\n", 1326 - __func__, 1327 - completion_entry); 1328 - break; 1329 - } 1330 - } 1331 - 1332 - /* Update the get register if we completed one or more entries */ 1333 - if (completion_count > 0) { 1334 - scic->completion_queue_get = 1335 - SMU_CQGR_GEN_BIT(ENABLE) | 1336 - SMU_CQGR_GEN_BIT(EVENT_ENABLE) | 1337 - event_cycle | 1338 - SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) | 1339 - get_cycle | 1340 - SMU_CQGR_GEN_VAL(POINTER, get_index); 1341 - 1342 - writel(scic->completion_queue_get, 1343 - &scic->smu_registers->completion_queue_get); 1344 - 1345 - } 1346 - 1347 - dev_dbg(scic_to_dev(scic), 1348 - "%s: completion queue ending get:0x%08x\n", 1349 - __func__, 1350 - scic->completion_queue_get); 1351 - 1352 - } 1353 - 1354 - bool scic_sds_controller_isr(struct scic_sds_controller *scic) 1355 - { 1356 - if (scic_sds_controller_completion_queue_has_entries(scic)) { 1357 - return true; 1358 - } else { 1359 - /* 1360 - * we have a spurious interrupt it could be that we have already 1361 - * emptied the completion queue from a previous interrupt */ 1362 - writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); 1363 - 1364 - /* 1365 - * There is a race in the hardware that could cause us not to be notified 1366 - * of an interrupt completion if we do not take this step. We will mask 1367 - * then unmask the interrupts so if there is another interrupt pending 1368 - * the clearing of the interrupt source we get the next interrupt message. */ 1369 - writel(0xFF000000, &scic->smu_registers->interrupt_mask); 1370 - writel(0, &scic->smu_registers->interrupt_mask); 1371 - } 1372 - 1373 - return false; 1374 - } 1375 - 1376 - void scic_sds_controller_completion_handler(struct scic_sds_controller *scic) 1377 - { 1378 - /* Empty out the completion queue */ 1379 - if (scic_sds_controller_completion_queue_has_entries(scic)) 1380 - scic_sds_controller_process_completions(scic); 1381 - 1382 - /* Clear the interrupt and enable all interrupts again */ 1383 - writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); 1384 - /* Could we write the value of SMU_ISR_COMPLETION? */ 1385 - writel(0xFF000000, &scic->smu_registers->interrupt_mask); 1386 - writel(0, &scic->smu_registers->interrupt_mask); 1387 - } 1388 - 1389 - bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) 1390 - { 1391 - u32 interrupt_status; 1392 - 1393 - interrupt_status = 1394 - readl(&scic->smu_registers->interrupt_status); 1395 - interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); 1396 - 1397 - if (interrupt_status != 0) { 1398 - /* 1399 - * There is an error interrupt pending so let it through and handle 1400 - * in the callback */ 1401 - return true; 1402 - } 1403 - 1404 - /* 1405 - * There is a race in the hardware that could cause us not to be notified 1406 - * of an interrupt completion if we do not take this step. We will mask 1407 - * then unmask the error interrupts so if there was another interrupt 1408 - * pending we will be notified. 1409 - * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ 1410 - writel(0xff, &scic->smu_registers->interrupt_mask); 1411 - writel(0, &scic->smu_registers->interrupt_mask); 1412 - 1413 - return false; 1414 - } 1415 - 1416 - void scic_sds_controller_error_handler(struct scic_sds_controller *scic) 1417 - { 1418 - u32 interrupt_status; 1419 - 1420 - interrupt_status = 1421 - readl(&scic->smu_registers->interrupt_status); 1422 - 1423 - if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && 1424 - scic_sds_controller_completion_queue_has_entries(scic)) { 1425 - 1426 - scic_sds_controller_process_completions(scic); 1427 - writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status); 1428 - } else { 1429 - dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__, 1430 - interrupt_status); 1431 - 1432 - sci_base_state_machine_change_state(&scic->state_machine, 1433 - SCI_BASE_CONTROLLER_STATE_FAILED); 1434 - 1435 - return; 1436 - } 1437 - 1438 - /* If we dont process any completions I am not sure that we want to do this. 1439 - * We are in the middle of a hardware fault and should probably be reset. 1440 - */ 1441 - writel(0, &scic->smu_registers->interrupt_mask); 1442 - } 1443 - 1444 - 1445 - 1446 - 1447 - void scic_sds_controller_link_up(struct scic_sds_controller *scic, 1448 - struct scic_sds_port *port, struct scic_sds_phy *phy) 1449 - { 1450 - switch (scic->state_machine.current_state_id) { 1451 - case SCI_BASE_CONTROLLER_STATE_STARTING: 1452 - scic_sds_controller_phy_timer_stop(scic); 1453 - scic->port_agent.link_up_handler(scic, &scic->port_agent, 1454 - port, phy); 1455 - scic_sds_controller_start_next_phy(scic); 1456 - break; 1457 - case SCI_BASE_CONTROLLER_STATE_READY: 1458 - scic->port_agent.link_up_handler(scic, &scic->port_agent, 1459 - port, phy); 1460 - break; 1461 - default: 1462 - dev_dbg(scic_to_dev(scic), 1463 - "%s: SCIC Controller linkup event from phy %d in " 1464 - "unexpected state %d\n", __func__, phy->phy_index, 1465 - scic->state_machine.current_state_id); 1466 - } 1467 - } 1468 - 1469 - void scic_sds_controller_link_down(struct scic_sds_controller *scic, 1470 - struct scic_sds_port *port, struct scic_sds_phy *phy) 1471 - { 1472 - switch (scic->state_machine.current_state_id) { 1473 - case SCI_BASE_CONTROLLER_STATE_STARTING: 1474 - case SCI_BASE_CONTROLLER_STATE_READY: 1475 - scic->port_agent.link_down_handler(scic, &scic->port_agent, 1476 - port, phy); 1477 - break; 1478 - default: 1479 - dev_dbg(scic_to_dev(scic), 1480 - "%s: SCIC Controller linkdown event from phy %d in " 1481 - "unexpected state %d\n", 1482 - __func__, 1483 - phy->phy_index, 1484 - scic->state_machine.current_state_id); 1485 - } 1486 - } 1487 - 1488 - /** 1489 - * This is a helper method to determine if any remote devices on this 1490 - * controller are still in the stopping state. 1491 - * 1492 - */ 1493 - static bool scic_sds_controller_has_remote_devices_stopping( 1494 - struct scic_sds_controller *controller) 1495 - { 1496 - u32 index; 1497 - 1498 - for (index = 0; index < controller->remote_node_entries; index++) { 1499 - if ((controller->device_table[index] != NULL) && 1500 - (controller->device_table[index]->state_machine.current_state_id 1501 - == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING)) 1502 - return true; 1503 - } 1504 - 1505 - return false; 1506 - } 1507 - 1508 - /** 1509 - * This method is called by the remote device to inform the controller 1510 - * object that the remote device has stopped. 1511 - */ 1512 - void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, 1513 - struct scic_sds_remote_device *sci_dev) 1514 - { 1515 - if (scic->state_machine.current_state_id != 1516 - SCI_BASE_CONTROLLER_STATE_STOPPING) { 1517 - dev_dbg(scic_to_dev(scic), 1518 - "SCIC Controller 0x%p remote device stopped event " 1519 - "from device 0x%p in unexpected state %d\n", 1520 - scic, sci_dev, 1521 - scic->state_machine.current_state_id); 1522 - return; 1523 - } 1524 - 1525 - if (!scic_sds_controller_has_remote_devices_stopping(scic)) { 1526 - sci_base_state_machine_change_state(&scic->state_machine, 1527 - SCI_BASE_CONTROLLER_STATE_STOPPED); 1528 - } 1529 - } 1530 - 1531 - /** 1532 - * This method will write to the SCU PCP register the request value. The method 1533 - * is used to suspend/resume ports, devices, and phys. 1534 - * @scic: 1535 - * 1536 - * 1537 - */ 1538 - void scic_sds_controller_post_request( 1539 - struct scic_sds_controller *scic, 1540 - u32 request) 1541 - { 1542 - dev_dbg(scic_to_dev(scic), 1543 - "%s: SCIC Controller 0x%p post request 0x%08x\n", 1544 - __func__, 1545 - scic, 1546 - request); 1547 - 1548 - writel(request, &scic->smu_registers->post_context_port); 1549 - } 1550 - 1551 - /** 1552 - * This method will copy the soft copy of the task context into the physical 1553 - * memory accessible by the controller. 1554 - * @scic: This parameter specifies the controller for which to copy 1555 - * the task context. 1556 - * @sci_req: This parameter specifies the request for which the task 1557 - * context is being copied. 1558 - * 1559 - * After this call is made the SCIC_SDS_IO_REQUEST object will always point to 1560 - * the physical memory version of the task context. Thus, all subsequent 1561 - * updates to the task context are performed in the TC table (i.e. DMAable 1562 - * memory). none 1563 - */ 1564 - void scic_sds_controller_copy_task_context( 1565 - struct scic_sds_controller *scic, 1566 - struct scic_sds_request *sci_req) 1567 - { 1568 - struct scu_task_context *task_context_buffer; 1569 - 1570 - task_context_buffer = scic_sds_controller_get_task_context_buffer( 1571 - scic, sci_req->io_tag); 1572 - 1573 - memcpy(task_context_buffer, 1574 - sci_req->task_context_buffer, 1575 - offsetof(struct scu_task_context, sgl_snapshot_ac)); 1576 - 1577 - /* 1578 - * Now that the soft copy of the TC has been copied into the TC 1579 - * table accessible by the silicon. Thus, any further changes to 1580 - * the TC (e.g. TC termination) occur in the appropriate location. */ 1581 - sci_req->task_context_buffer = task_context_buffer; 1582 - } 1583 - 1584 - /** 1585 - * This method returns the task context buffer for the given io tag. 1586 - * @scic: 1587 - * @io_tag: 1588 - * 1589 - * struct scu_task_context* 1590 - */ 1591 - struct scu_task_context *scic_sds_controller_get_task_context_buffer( 1592 - struct scic_sds_controller *scic, 1593 - u16 io_tag 1594 - ) { 1595 - u16 task_index = scic_sds_io_tag_get_index(io_tag); 1596 - 1597 - if (task_index < scic->task_context_entries) { 1598 - return &scic->task_context_table[task_index]; 1599 - } 1600 - 1601 - return NULL; 1602 - } 1603 - 1604 - struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, 1605 - u16 io_tag) 1606 - { 1607 - u16 task_index; 1608 - u16 task_sequence; 1609 - 1610 - task_index = scic_sds_io_tag_get_index(io_tag); 1611 - 1612 - if (task_index < scic->task_context_entries) { 1613 - if (scic->io_request_table[task_index] != NULL) { 1614 - task_sequence = scic_sds_io_tag_get_sequence(io_tag); 1615 - 1616 - if (task_sequence == scic->io_request_sequence[task_index]) { 1617 - return scic->io_request_table[task_index]; 1618 - } 1619 - } 1620 - } 1621 - 1622 - return NULL; 1623 - } 1624 - 1625 - /** 1626 - * This method allocates remote node index and the reserves the remote node 1627 - * context space for use. This method can fail if there are no more remote 1628 - * node index available. 1629 - * @scic: This is the controller object which contains the set of 1630 - * free remote node ids 1631 - * @sci_dev: This is the device object which is requesting the a remote node 1632 - * id 1633 - * @node_id: This is the remote node id that is assinged to the device if one 1634 - * is available 1635 - * 1636 - * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote 1637 - * node index available. 1638 - */ 1639 - enum sci_status scic_sds_controller_allocate_remote_node_context( 1640 - struct scic_sds_controller *scic, 1641 - struct scic_sds_remote_device *sci_dev, 1642 - u16 *node_id) 1643 - { 1644 - u16 node_index; 1645 - u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); 1646 - 1647 - node_index = scic_sds_remote_node_table_allocate_remote_node( 1648 - &scic->available_remote_nodes, remote_node_count 1649 - ); 1650 - 1651 - if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 1652 - scic->device_table[node_index] = sci_dev; 1653 - 1654 - *node_id = node_index; 1655 - 1656 - return SCI_SUCCESS; 1657 - } 1658 - 1659 - return SCI_FAILURE_INSUFFICIENT_RESOURCES; 1660 - } 1661 - 1662 - /** 1663 - * This method frees the remote node index back to the available pool. Once 1664 - * this is done the remote node context buffer is no longer valid and can 1665 - * not be used. 1666 - * @scic: 1667 - * @sci_dev: 1668 - * @node_id: 1669 - * 1670 - */ 1671 - void scic_sds_controller_free_remote_node_context( 1672 - struct scic_sds_controller *scic, 1673 - struct scic_sds_remote_device *sci_dev, 1674 - u16 node_id) 1675 - { 1676 - u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); 1677 - 1678 - if (scic->device_table[node_id] == sci_dev) { 1679 - scic->device_table[node_id] = NULL; 1680 - 1681 - scic_sds_remote_node_table_release_remote_node_index( 1682 - &scic->available_remote_nodes, remote_node_count, node_id 1683 - ); 1684 - } 1685 - } 1686 - 1687 - /** 1688 - * This method returns the union scu_remote_node_context for the specified remote 1689 - * node id. 1690 - * @scic: 1691 - * @node_id: 1692 - * 1693 - * union scu_remote_node_context* 1694 - */ 1695 - union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( 1696 - struct scic_sds_controller *scic, 1697 - u16 node_id 1698 - ) { 1699 - if ( 1700 - (node_id < scic->remote_node_entries) 1701 - && (scic->device_table[node_id] != NULL) 1702 - ) { 1703 - return &scic->remote_node_context_table[node_id]; 1704 - } 1705 - 1706 - return NULL; 1707 - } 1708 - 1709 - /** 1710 - * 1711 - * @resposne_buffer: This is the buffer into which the D2H register FIS will be 1712 - * constructed. 1713 - * @frame_header: This is the frame header returned by the hardware. 1714 - * @frame_buffer: This is the frame buffer returned by the hardware. 1715 - * 1716 - * This method will combind the frame header and frame buffer to create a SATA 1717 - * D2H register FIS none 1718 - */ 1719 - void scic_sds_controller_copy_sata_response( 1720 - void *response_buffer, 1721 - void *frame_header, 1722 - void *frame_buffer) 1723 - { 1724 - memcpy(response_buffer, frame_header, sizeof(u32)); 1725 - 1726 - memcpy(response_buffer + sizeof(u32), 1727 - frame_buffer, 1728 - sizeof(struct dev_to_host_fis) - sizeof(u32)); 1729 - } 1730 - 1731 - /** 1732 - * This method releases the frame once this is done the frame is available for 1733 - * re-use by the hardware. The data contained in the frame header and frame 1734 - * buffer is no longer valid. The UF queue get pointer is only updated if UF 1735 - * control indicates this is appropriate. 1736 - * @scic: 1737 - * @frame_index: 1738 - * 1739 - */ 1740 - void scic_sds_controller_release_frame( 1741 - struct scic_sds_controller *scic, 1742 - u32 frame_index) 1743 - { 1744 - if (scic_sds_unsolicited_frame_control_release_frame( 1745 - &scic->uf_control, frame_index) == true) 1746 - writel(scic->uf_control.get, 1747 - &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 1748 - } 1749 - 1750 - /** 1751 - * This method sets user parameters and OEM parameters to default values. 1752 - * Users can override these values utilizing the scic_user_parameters_set() 1753 - * and scic_oem_parameters_set() methods. 1754 - * @scic: This parameter specifies the controller for which to set the 1755 - * configuration parameters to their default values. 1756 - * 1757 - */ 1758 - static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic) 1759 - { 1760 - struct isci_host *ihost = scic_to_ihost(scic); 1761 - u16 index; 1762 - 1763 - /* Default to APC mode. */ 1764 - scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; 1765 - 1766 - /* Default to APC mode. */ 1767 - scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; 1768 - 1769 - /* Default to no SSC operation. */ 1770 - scic->oem_parameters.sds1.controller.do_enable_ssc = false; 1771 - 1772 - /* Initialize all of the port parameter information to narrow ports. */ 1773 - for (index = 0; index < SCI_MAX_PORTS; index++) { 1774 - scic->oem_parameters.sds1.ports[index].phy_mask = 0; 1775 - } 1776 - 1777 - /* Initialize all of the phy parameter information. */ 1778 - for (index = 0; index < SCI_MAX_PHYS; index++) { 1779 - /* Default to 6G (i.e. Gen 3) for now. */ 1780 - scic->user_parameters.sds1.phys[index].max_speed_generation = 3; 1781 - 1782 - /* the frequencies cannot be 0 */ 1783 - scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; 1784 - scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; 1785 - scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; 1786 - 1787 - /* 1788 - * Previous Vitesse based expanders had a arbitration issue that 1789 - * is worked around by having the upper 32-bits of SAS address 1790 - * with a value greater then the Vitesse company identifier. 1791 - * Hence, usage of 0x5FCFFFFF. */ 1792 - scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; 1793 - scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; 1794 - } 1795 - 1796 - scic->user_parameters.sds1.stp_inactivity_timeout = 5; 1797 - scic->user_parameters.sds1.ssp_inactivity_timeout = 5; 1798 - scic->user_parameters.sds1.stp_max_occupancy_timeout = 5; 1799 - scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20; 1800 - scic->user_parameters.sds1.no_outbound_task_timeout = 20; 1801 - } 1802 - 1803 - /** 1804 - * scic_controller_get_suggested_start_timeout() - This method returns the 1805 - * suggested scic_controller_start() timeout amount. The user is free to 1806 - * use any timeout value, but this method provides the suggested minimum 1807 - * start timeout value. The returned value is based upon empirical 1808 - * information determined as a result of interoperability testing. 1809 - * @controller: the handle to the controller object for which to return the 1810 - * suggested start timeout. 1811 - * 1812 - * This method returns the number of milliseconds for the suggested start 1813 - * operation timeout. 1814 - */ 1815 - u32 scic_controller_get_suggested_start_timeout( 1816 - struct scic_sds_controller *sc) 1817 - { 1818 - /* Validate the user supplied parameters. */ 1819 - if (sc == NULL) 1820 - return 0; 1821 - 1822 - /* 1823 - * The suggested minimum timeout value for a controller start operation: 1824 - * 1825 - * Signature FIS Timeout 1826 - * + Phy Start Timeout 1827 - * + Number of Phy Spin Up Intervals 1828 - * --------------------------------- 1829 - * Number of milliseconds for the controller start operation. 1830 - * 1831 - * NOTE: The number of phy spin up intervals will be equivalent 1832 - * to the number of phys divided by the number phys allowed 1833 - * per interval - 1 (once OEM parameters are supported). 1834 - * Currently we assume only 1 phy per interval. */ 1835 - 1836 - return SCIC_SDS_SIGNATURE_FIS_TIMEOUT 1837 - + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 1838 - + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 1839 - } 1840 - 1841 - /** 1842 - * scic_controller_stop() - This method will stop an individual controller 1843 - * object.This method will invoke the associated user callback upon 1844 - * completion. The completion callback is called when the following 1845 - * conditions are met: -# the method return status is SCI_SUCCESS. -# the 1846 - * controller has been quiesced. This method will ensure that all IO 1847 - * requests are quiesced, phys are stopped, and all additional operation by 1848 - * the hardware is halted. 1849 - * @controller: the handle to the controller object to stop. 1850 - * @timeout: This parameter specifies the number of milliseconds in which the 1851 - * stop operation should complete. 1852 - * 1853 - * The controller must be in the STARTED or STOPPED state. Indicate if the 1854 - * controller stop method succeeded or failed in some way. SCI_SUCCESS if the 1855 - * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the 1856 - * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the 1857 - * controller is not either in the STARTED or STOPPED states. 1858 - */ 1859 - enum sci_status scic_controller_stop( 1860 - struct scic_sds_controller *scic, 1861 - u32 timeout) 1862 - { 1863 - if (scic->state_machine.current_state_id != 1864 - SCI_BASE_CONTROLLER_STATE_READY) { 1865 - dev_warn(scic_to_dev(scic), 1866 - "SCIC Controller stop operation requested in " 1867 - "invalid state\n"); 1868 - return SCI_FAILURE_INVALID_STATE; 1869 - } 1870 - 1871 - isci_timer_start(scic->timeout_timer, timeout); 1872 - sci_base_state_machine_change_state(&scic->state_machine, 1873 - SCI_BASE_CONTROLLER_STATE_STOPPING); 1874 - return SCI_SUCCESS; 1875 - } 1876 - 1877 - /** 1878 - * scic_controller_reset() - This method will reset the supplied core 1879 - * controller regardless of the state of said controller. This operation is 1880 - * considered destructive. In other words, all current operations are wiped 1881 - * out. No IO completions for outstanding devices occur. Outstanding IO 1882 - * requests are not aborted or completed at the actual remote device. 1883 - * @controller: the handle to the controller object to reset. 1884 - * 1885 - * Indicate if the controller reset method succeeded or failed in some way. 1886 - * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if 1887 - * the controller reset operation is unable to complete. 1888 - */ 1889 - enum sci_status scic_controller_reset( 1890 - struct scic_sds_controller *scic) 1891 - { 1892 - switch (scic->state_machine.current_state_id) { 1893 - case SCI_BASE_CONTROLLER_STATE_RESET: 1894 - case SCI_BASE_CONTROLLER_STATE_READY: 1895 - case SCI_BASE_CONTROLLER_STATE_STOPPED: 1896 - case SCI_BASE_CONTROLLER_STATE_FAILED: 1897 - /* 1898 - * The reset operation is not a graceful cleanup, just 1899 - * perform the state transition. 1900 - */ 1901 - sci_base_state_machine_change_state(&scic->state_machine, 1902 - SCI_BASE_CONTROLLER_STATE_RESETTING); 1903 - return SCI_SUCCESS; 1904 - default: 1905 - dev_warn(scic_to_dev(scic), 1906 - "SCIC Controller reset operation requested in " 1907 - "invalid state\n"); 1908 - return SCI_FAILURE_INVALID_STATE; 1909 - } 1910 - } 1911 - 1912 - /** 1913 - * scic_controller_start_io() - This method is called by the SCI user to 1914 - * send/start an IO request. If the method invocation is successful, then 1915 - * the IO request has been queued to the hardware for processing. 1916 - * @controller: the handle to the controller object for which to start an IO 1917 - * request. 1918 - * @remote_device: the handle to the remote device object for which to start an 1919 - * IO request. 1920 - * @io_request: the handle to the io request object to start. 1921 - * @io_tag: This parameter specifies a previously allocated IO tag that the 1922 - * user desires to be utilized for this request. This parameter is optional. 1923 - * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value 1924 - * for this parameter. 1925 - * 1926 - * - IO tags are a protected resource. It is incumbent upon the SCI Core user 1927 - * to ensure that each of the methods that may allocate or free available IO 1928 - * tags are handled in a mutually exclusive manner. This method is one of said 1929 - * methods requiring proper critical code section protection (e.g. semaphore, 1930 - * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a 1931 - * result, it is expected the user will have set the NCQ tag field in the host 1932 - * to device register FIS prior to calling this method. There is also a 1933 - * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking 1934 - * the scic_controller_start_io() method. scic_controller_allocate_tag() for 1935 - * more information on allocating a tag. Indicate if the controller 1936 - * successfully started the IO request. SCI_SUCCESS if the IO request was 1937 - * successfully started. Determine the failure situations and return values. 1938 - */ 1939 - enum sci_status scic_controller_start_io( 1940 - struct scic_sds_controller *scic, 1941 - struct scic_sds_remote_device *rdev, 1942 - struct scic_sds_request *req, 1943 - u16 io_tag) 1944 - { 1945 - enum sci_status status; 1946 - 1947 - if (scic->state_machine.current_state_id != 1948 - SCI_BASE_CONTROLLER_STATE_READY) { 1949 - dev_warn(scic_to_dev(scic), "invalid state to start I/O"); 1950 - return SCI_FAILURE_INVALID_STATE; 1951 - } 1952 - 1953 - status = scic_sds_remote_device_start_io(scic, rdev, req); 1954 - if (status != SCI_SUCCESS) 1955 - return status; 1956 - 1957 - scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 1958 - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); 1959 - return SCI_SUCCESS; 1960 - } 1961 - 1962 - /** 1963 - * scic_controller_terminate_request() - This method is called by the SCI Core 1964 - * user to terminate an ongoing (i.e. started) core IO request. This does 1965 - * not abort the IO request at the target, but rather removes the IO request 1966 - * from the host controller. 1967 - * @controller: the handle to the controller object for which to terminate a 1968 - * request. 1969 - * @remote_device: the handle to the remote device object for which to 1970 - * terminate a request. 1971 - * @request: the handle to the io or task management request object to 1972 - * terminate. 1973 - * 1974 - * Indicate if the controller successfully began the terminate process for the 1975 - * IO request. SCI_SUCCESS if the terminate process was successfully started 1976 - * for the request. Determine the failure situations and return values. 1977 - */ 1978 - enum sci_status scic_controller_terminate_request( 1979 - struct scic_sds_controller *scic, 1980 - struct scic_sds_remote_device *rdev, 1981 - struct scic_sds_request *req) 1982 - { 1983 - enum sci_status status; 1984 - 1985 - if (scic->state_machine.current_state_id != 1986 - SCI_BASE_CONTROLLER_STATE_READY) { 1987 - dev_warn(scic_to_dev(scic), 1988 - "invalid state to terminate request\n"); 1989 - return SCI_FAILURE_INVALID_STATE; 1990 - } 1991 - 1992 - status = scic_sds_io_request_terminate(req); 1993 - if (status != SCI_SUCCESS) 1994 - return status; 1995 - 1996 - /* 1997 - * Utilize the original post context command and or in the POST_TC_ABORT 1998 - * request sub-type. 1999 - */ 2000 - scic_sds_controller_post_request(scic, 2001 - scic_sds_request_get_post_context(req) | 2002 - SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2003 - return SCI_SUCCESS; 2004 - } 2005 - 2006 - /** 2007 - * scic_controller_complete_io() - This method will perform core specific 2008 - * completion operations for an IO request. After this method is invoked, 2009 - * the user should consider the IO request as invalid until it is properly 2010 - * reused (i.e. re-constructed). 2011 - * @controller: The handle to the controller object for which to complete the 2012 - * IO request. 2013 - * @remote_device: The handle to the remote device object for which to complete 2014 - * the IO request. 2015 - * @io_request: the handle to the io request object to complete. 2016 - * 2017 - * - IO tags are a protected resource. It is incumbent upon the SCI Core user 2018 - * to ensure that each of the methods that may allocate or free available IO 2019 - * tags are handled in a mutually exclusive manner. This method is one of said 2020 - * methods requiring proper critical code section protection (e.g. semaphore, 2021 - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI 2022 - * Core user, using the scic_controller_allocate_io_tag() method, then it is 2023 - * the responsibility of the caller to invoke the scic_controller_free_io_tag() 2024 - * method to free the tag (i.e. this method will not free the IO tag). Indicate 2025 - * if the controller successfully completed the IO request. SCI_SUCCESS if the 2026 - * completion process was successful. 2027 - */ 2028 - enum sci_status scic_controller_complete_io( 2029 - struct scic_sds_controller *scic, 2030 - struct scic_sds_remote_device *rdev, 2031 - struct scic_sds_request *request) 2032 - { 2033 - enum sci_status status; 2034 - u16 index; 2035 - 2036 - switch (scic->state_machine.current_state_id) { 2037 - case SCI_BASE_CONTROLLER_STATE_STOPPING: 2038 - /* XXX: Implement this function */ 2039 - return SCI_FAILURE; 2040 - case SCI_BASE_CONTROLLER_STATE_READY: 2041 - status = scic_sds_remote_device_complete_io(scic, rdev, request); 2042 - if (status != SCI_SUCCESS) 2043 - return status; 2044 - 2045 - index = scic_sds_io_tag_get_index(request->io_tag); 2046 - scic->io_request_table[index] = NULL; 2047 - return SCI_SUCCESS; 2048 - default: 2049 - dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); 2050 - return SCI_FAILURE_INVALID_STATE; 2051 - } 2052 - 2053 - } 2054 - 2055 - enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) 2056 - { 2057 - struct scic_sds_controller *scic = sci_req->owning_controller; 2058 - 2059 - if (scic->state_machine.current_state_id != 2060 - SCI_BASE_CONTROLLER_STATE_READY) { 2061 - dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); 2062 - return SCI_FAILURE_INVALID_STATE; 2063 - } 2064 - 2065 - scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req; 2066 - scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req)); 2067 - return SCI_SUCCESS; 2068 - } 2069 - 2070 - /** 2071 - * scic_controller_start_task() - This method is called by the SCIC user to 2072 - * send/start a framework task management request. 2073 - * @controller: the handle to the controller object for which to start the task 2074 - * management request. 2075 - * @remote_device: the handle to the remote device object for which to start 2076 - * the task management request. 2077 - * @task_request: the handle to the task request object to start. 2078 - * @io_tag: This parameter specifies a previously allocated IO tag that the 2079 - * user desires to be utilized for this request. Note this not the io_tag 2080 - * of the request being managed. It is to be utilized for the task request 2081 - * itself. This parameter is optional. The user is allowed to supply 2082 - * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. 2083 - * 2084 - * - IO tags are a protected resource. It is incumbent upon the SCI Core user 2085 - * to ensure that each of the methods that may allocate or free available IO 2086 - * tags are handled in a mutually exclusive manner. This method is one of said 2087 - * methods requiring proper critical code section protection (e.g. semaphore, 2088 - * spin-lock, etc.). - The user must synchronize this task with completion 2089 - * queue processing. If they are not synchronized then it is possible for the 2090 - * io requests that are being managed by the task request can complete before 2091 - * starting the task request. scic_controller_allocate_tag() for more 2092 - * information on allocating a tag. Indicate if the controller successfully 2093 - * started the IO request. SCI_TASK_SUCCESS if the task request was 2094 - * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is 2095 - * returned if there is/are task(s) outstanding that require termination or 2096 - * completion before this request can succeed. 2097 - */ 2098 - enum sci_task_status scic_controller_start_task( 2099 - struct scic_sds_controller *scic, 2100 - struct scic_sds_remote_device *rdev, 2101 - struct scic_sds_request *req, 2102 - u16 task_tag) 2103 - { 2104 - enum sci_status status; 2105 - 2106 - if (scic->state_machine.current_state_id != 2107 - SCI_BASE_CONTROLLER_STATE_READY) { 2108 - dev_warn(scic_to_dev(scic), 2109 - "%s: SCIC Controller starting task from invalid " 2110 - "state\n", 2111 - __func__); 2112 - return SCI_TASK_FAILURE_INVALID_STATE; 2113 - } 2114 - 2115 - status = scic_sds_remote_device_start_task(scic, rdev, req); 2116 - switch (status) { 2117 - case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 2118 - scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 2119 - 2120 - /* 2121 - * We will let framework know this task request started successfully, 2122 - * although core is still woring on starting the request (to post tc when 2123 - * RNC is resumed.) 2124 - */ 2125 - return SCI_SUCCESS; 2126 - case SCI_SUCCESS: 2127 - scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 2128 - 2129 - scic_sds_controller_post_request(scic, 2130 - scic_sds_request_get_post_context(req)); 2131 - break; 2132 - default: 2133 - break; 2134 - } 2135 - 2136 - return status; 2137 - } 2138 - 2139 - /** 2140 - * scic_controller_allocate_io_tag() - This method will allocate a tag from the 2141 - * pool of free IO tags. Direct allocation of IO tags by the SCI Core user 2142 - * is optional. The scic_controller_start_io() method will allocate an IO 2143 - * tag if this method is not utilized and the tag is not supplied to the IO 2144 - * construct routine. Direct allocation of IO tags may provide additional 2145 - * performance improvements in environments capable of supporting this usage 2146 - * model. Additionally, direct allocation of IO tags also provides 2147 - * additional flexibility to the SCI Core user. Specifically, the user may 2148 - * retain IO tags across the lives of multiple IO requests. 2149 - * @controller: the handle to the controller object for which to allocate the 2150 - * tag. 2151 - * 2152 - * IO tags are a protected resource. It is incumbent upon the SCI Core user to 2153 - * ensure that each of the methods that may allocate or free available IO tags 2154 - * are handled in a mutually exclusive manner. This method is one of said 2155 - * methods requiring proper critical code section protection (e.g. semaphore, 2156 - * spin-lock, etc.). An unsigned integer representing an available IO tag. 2157 - * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no 2158 - * currently available tags to be allocated. All return other values indicate a 2159 - * legitimate tag. 2160 - */ 2161 - u16 scic_controller_allocate_io_tag( 2162 - struct scic_sds_controller *scic) 2163 - { 2164 - u16 task_context; 2165 - u16 sequence_count; 2166 - 2167 - if (!sci_pool_empty(scic->tci_pool)) { 2168 - sci_pool_get(scic->tci_pool, task_context); 2169 - 2170 - sequence_count = scic->io_request_sequence[task_context]; 2171 - 2172 - return scic_sds_io_tag_construct(sequence_count, task_context); 2173 - } 2174 - 2175 - return SCI_CONTROLLER_INVALID_IO_TAG; 2176 - } 2177 - 2178 - /** 2179 - * scic_controller_free_io_tag() - This method will free an IO tag to the pool 2180 - * of free IO tags. This method provides the SCI Core user more flexibility 2181 - * with regards to IO tags. The user may desire to keep an IO tag after an 2182 - * IO request has completed, because they plan on re-using the tag for a 2183 - * subsequent IO request. This method is only legal if the tag was 2184 - * allocated via scic_controller_allocate_io_tag(). 2185 - * @controller: This parameter specifies the handle to the controller object 2186 - * for which to free/return the tag. 2187 - * @io_tag: This parameter represents the tag to be freed to the pool of 2188 - * available tags. 2189 - * 2190 - * - IO tags are a protected resource. It is incumbent upon the SCI Core user 2191 - * to ensure that each of the methods that may allocate or free available IO 2192 - * tags are handled in a mutually exclusive manner. This method is one of said 2193 - * methods requiring proper critical code section protection (e.g. semaphore, 2194 - * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI 2195 - * Core user, using the scic_controller_allocate_io_tag() method, then it is 2196 - * the responsibility of the caller to invoke this method to free the tag. This 2197 - * method returns an indication of whether the tag was successfully put back 2198 - * (freed) to the pool of available tags. SCI_SUCCESS This return value 2199 - * indicates the tag was successfully placed into the pool of available IO 2200 - * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag 2201 - * is not a valid IO tag value. 2202 - */ 2203 - enum sci_status scic_controller_free_io_tag( 2204 - struct scic_sds_controller *scic, 2205 - u16 io_tag) 2206 - { 2207 - u16 sequence; 2208 - u16 index; 2209 - 2210 - BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG); 2211 - 2212 - sequence = scic_sds_io_tag_get_sequence(io_tag); 2213 - index = scic_sds_io_tag_get_index(io_tag); 2214 - 2215 - if (!sci_pool_full(scic->tci_pool)) { 2216 - if (sequence == scic->io_request_sequence[index]) { 2217 - scic_sds_io_sequence_increment( 2218 - scic->io_request_sequence[index]); 2219 - 2220 - sci_pool_put(scic->tci_pool, index); 2221 - 2222 - return SCI_SUCCESS; 2223 - } 2224 - } 2225 - 2226 - return SCI_FAILURE_INVALID_IO_TAG; 2227 - } 2228 - 2229 - void scic_controller_enable_interrupts( 2230 - struct scic_sds_controller *scic) 2231 - { 2232 - BUG_ON(scic->smu_registers == NULL); 2233 - writel(0, &scic->smu_registers->interrupt_mask); 2234 - } 2235 - 2236 - void scic_controller_disable_interrupts( 2237 - struct scic_sds_controller *scic) 2238 - { 2239 - BUG_ON(scic->smu_registers == NULL); 2240 - writel(0xffffffff, &scic->smu_registers->interrupt_mask); 2241 - } 2242 - 2243 - static enum sci_status scic_controller_set_mode( 2244 - struct scic_sds_controller *scic, 2245 - enum sci_controller_mode operating_mode) 2246 - { 2247 - enum sci_status status = SCI_SUCCESS; 2248 - 2249 - if ((scic->state_machine.current_state_id == 2250 - SCI_BASE_CONTROLLER_STATE_INITIALIZING) || 2251 - (scic->state_machine.current_state_id == 2252 - SCI_BASE_CONTROLLER_STATE_INITIALIZED)) { 2253 - switch (operating_mode) { 2254 - case SCI_MODE_SPEED: 2255 - scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; 2256 - scic->task_context_entries = SCU_IO_REQUEST_COUNT; 2257 - scic->uf_control.buffers.count = 2258 - SCU_UNSOLICITED_FRAME_COUNT; 2259 - scic->completion_event_entries = SCU_EVENT_COUNT; 2260 - scic->completion_queue_entries = 2261 - SCU_COMPLETION_QUEUE_COUNT; 2262 - break; 2263 - 2264 - case SCI_MODE_SIZE: 2265 - scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES; 2266 - scic->task_context_entries = SCI_MIN_IO_REQUESTS; 2267 - scic->uf_control.buffers.count = 2268 - SCU_MIN_UNSOLICITED_FRAMES; 2269 - scic->completion_event_entries = SCU_MIN_EVENTS; 2270 - scic->completion_queue_entries = 2271 - SCU_MIN_COMPLETION_QUEUE_ENTRIES; 2272 - break; 2273 - 2274 - default: 2275 - status = SCI_FAILURE_INVALID_PARAMETER_VALUE; 2276 - break; 2277 - } 2278 - } else 2279 - status = SCI_FAILURE_INVALID_STATE; 2280 - 2281 - return status; 2282 - } 2283 - 2284 - /** 2285 - * scic_sds_controller_reset_hardware() - 2286 - * 2287 - * This method will reset the controller hardware. 2288 - */ 2289 - static void scic_sds_controller_reset_hardware( 2290 - struct scic_sds_controller *scic) 2291 - { 2292 - /* Disable interrupts so we dont take any spurious interrupts */ 2293 - scic_controller_disable_interrupts(scic); 2294 - 2295 - /* Reset the SCU */ 2296 - writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control); 2297 - 2298 - /* Delay for 1ms to before clearing the CQP and UFQPR. */ 2299 - udelay(1000); 2300 - 2301 - /* The write to the CQGR clears the CQP */ 2302 - writel(0x00000000, &scic->smu_registers->completion_queue_get); 2303 - 2304 - /* The write to the UFQGP clears the UFQPR */ 2305 - writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 2306 - } 2307 - 2308 - enum sci_status scic_user_parameters_set( 2309 - struct scic_sds_controller *scic, 2310 - union scic_user_parameters *scic_parms) 2311 - { 2312 - u32 state = scic->state_machine.current_state_id; 2313 - 2314 - if (state == SCI_BASE_CONTROLLER_STATE_RESET || 2315 - state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || 2316 - state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 2317 - u16 index; 2318 - 2319 - /* 2320 - * Validate the user parameters. If they are not legal, then 2321 - * return a failure. 2322 - */ 2323 - for (index = 0; index < SCI_MAX_PHYS; index++) { 2324 - struct sci_phy_user_params *user_phy; 2325 - 2326 - user_phy = &scic_parms->sds1.phys[index]; 2327 - 2328 - if (!((user_phy->max_speed_generation <= 2329 - SCIC_SDS_PARM_MAX_SPEED) && 2330 - (user_phy->max_speed_generation > 2331 - SCIC_SDS_PARM_NO_SPEED))) 2332 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2333 - 2334 - if (user_phy->in_connection_align_insertion_frequency < 2335 - 3) 2336 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2337 - 2338 - if ((user_phy->in_connection_align_insertion_frequency < 2339 - 3) || 2340 - (user_phy->align_insertion_frequency == 0) || 2341 - (user_phy-> 2342 - notify_enable_spin_up_insertion_frequency == 2343 - 0)) 2344 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2345 - } 2346 - 2347 - if ((scic_parms->sds1.stp_inactivity_timeout == 0) || 2348 - (scic_parms->sds1.ssp_inactivity_timeout == 0) || 2349 - (scic_parms->sds1.stp_max_occupancy_timeout == 0) || 2350 - (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || 2351 - (scic_parms->sds1.no_outbound_task_timeout == 0)) 2352 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2353 - 2354 - memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms)); 2355 - 2356 - return SCI_SUCCESS; 2357 - } 2358 - 2359 - return SCI_FAILURE_INVALID_STATE; 2360 - } 2361 - 2362 - int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) 2363 - { 2364 - int i; 2365 - 2366 - for (i = 0; i < SCI_MAX_PORTS; i++) 2367 - if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) 2368 - return -EINVAL; 2369 - 2370 - for (i = 0; i < SCI_MAX_PHYS; i++) 2371 - if (oem->phys[i].sas_address.high == 0 && 2372 - oem->phys[i].sas_address.low == 0) 2373 - return -EINVAL; 2374 - 2375 - if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { 2376 - for (i = 0; i < SCI_MAX_PHYS; i++) 2377 - if (oem->ports[i].phy_mask != 0) 2378 - return -EINVAL; 2379 - } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 2380 - u8 phy_mask = 0; 2381 - 2382 - for (i = 0; i < SCI_MAX_PHYS; i++) 2383 - phy_mask |= oem->ports[i].phy_mask; 2384 - 2385 - if (phy_mask == 0) 2386 - return -EINVAL; 2387 - } else 2388 - return -EINVAL; 2389 - 2390 - if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) 2391 - return -EINVAL; 2392 - 2393 - return 0; 2394 - } 2395 - 2396 - enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, 2397 - union scic_oem_parameters *scic_parms) 2398 - { 2399 - u32 state = scic->state_machine.current_state_id; 2400 - 2401 - if (state == SCI_BASE_CONTROLLER_STATE_RESET || 2402 - state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || 2403 - state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 2404 - 2405 - if (scic_oem_parameters_validate(&scic_parms->sds1)) 2406 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2407 - scic->oem_parameters.sds1 = scic_parms->sds1; 2408 - 2409 - return SCI_SUCCESS; 2410 - } 2411 - 2412 - return SCI_FAILURE_INVALID_STATE; 2413 - } 2414 - 2415 - void scic_oem_parameters_get( 2416 - struct scic_sds_controller *scic, 2417 - union scic_oem_parameters *scic_parms) 2418 - { 2419 - memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms)); 2420 - } 2421 - 2422 - #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 2423 - #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 2424 - #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 2425 - #define INTERRUPT_COALESCE_NUMBER_MAX 256 2426 - #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 2427 - #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 2428 - 2429 - /** 2430 - * scic_controller_set_interrupt_coalescence() - This method allows the user to 2431 - * configure the interrupt coalescence. 2432 - * @controller: This parameter represents the handle to the controller object 2433 - * for which its interrupt coalesce register is overridden. 2434 - * @coalesce_number: Used to control the number of entries in the Completion 2435 - * Queue before an interrupt is generated. If the number of entries exceed 2436 - * this number, an interrupt will be generated. The valid range of the input 2437 - * is [0, 256]. A setting of 0 results in coalescing being disabled. 2438 - * @coalesce_timeout: Timeout value in microseconds. The valid range of the 2439 - * input is [0, 2700000] . A setting of 0 is allowed and results in no 2440 - * interrupt coalescing timeout. 2441 - * 2442 - * Indicate if the user successfully set the interrupt coalesce parameters. 2443 - * SCI_SUCCESS The user successfully updated the interrutp coalescence. 2444 - * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. 2445 - */ 2446 - static enum sci_status scic_controller_set_interrupt_coalescence( 2447 - struct scic_sds_controller *scic_controller, 2448 - u32 coalesce_number, 2449 - u32 coalesce_timeout) 2450 - { 2451 - u8 timeout_encode = 0; 2452 - u32 min = 0; 2453 - u32 max = 0; 2454 - 2455 - /* Check if the input parameters fall in the range. */ 2456 - if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) 2457 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2458 - 2459 - /* 2460 - * Defined encoding for interrupt coalescing timeout: 2461 - * Value Min Max Units 2462 - * ----- --- --- ----- 2463 - * 0 - - Disabled 2464 - * 1 13.3 20.0 ns 2465 - * 2 26.7 40.0 2466 - * 3 53.3 80.0 2467 - * 4 106.7 160.0 2468 - * 5 213.3 320.0 2469 - * 6 426.7 640.0 2470 - * 7 853.3 1280.0 2471 - * 8 1.7 2.6 us 2472 - * 9 3.4 5.1 2473 - * 10 6.8 10.2 2474 - * 11 13.7 20.5 2475 - * 12 27.3 41.0 2476 - * 13 54.6 81.9 2477 - * 14 109.2 163.8 2478 - * 15 218.5 327.7 2479 - * 16 436.9 655.4 2480 - * 17 873.8 1310.7 2481 - * 18 1.7 2.6 ms 2482 - * 19 3.5 5.2 2483 - * 20 7.0 10.5 2484 - * 21 14.0 21.0 2485 - * 22 28.0 41.9 2486 - * 23 55.9 83.9 2487 - * 24 111.8 167.8 2488 - * 25 223.7 335.5 2489 - * 26 447.4 671.1 2490 - * 27 894.8 1342.2 2491 - * 28 1.8 2.7 s 2492 - * Others Undefined */ 2493 - 2494 - /* 2495 - * Use the table above to decide the encode of interrupt coalescing timeout 2496 - * value for register writing. */ 2497 - if (coalesce_timeout == 0) 2498 - timeout_encode = 0; 2499 - else{ 2500 - /* make the timeout value in unit of (10 ns). */ 2501 - coalesce_timeout = coalesce_timeout * 100; 2502 - min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; 2503 - max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; 2504 - 2505 - /* get the encode of timeout for register writing. */ 2506 - for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; 2507 - timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; 2508 - timeout_encode++) { 2509 - if (min <= coalesce_timeout && max > coalesce_timeout) 2510 - break; 2511 - else if (coalesce_timeout >= max && coalesce_timeout < min * 2 2512 - && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { 2513 - if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) 2514 - break; 2515 - else{ 2516 - timeout_encode++; 2517 - break; 2518 - } 2519 - } else { 2520 - max = max * 2; 2521 - min = min * 2; 2522 - } 2523 - } 2524 - 2525 - if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) 2526 - /* the value is out of range. */ 2527 - return SCI_FAILURE_INVALID_PARAMETER_VALUE; 2528 - } 2529 - 2530 - writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | 2531 - SMU_ICC_GEN_VAL(TIMER, timeout_encode), 2532 - &scic_controller->smu_registers->interrupt_coalesce_control); 2533 - 2534 - 2535 - scic_controller->interrupt_coalesce_number = (u16)coalesce_number; 2536 - scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100; 2537 - 2538 - return SCI_SUCCESS; 2539 - } 2540 - 2541 - 2542 - 2543 - enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) 2544 - { 2545 - struct sci_base_state_machine *sm = &scic->state_machine; 2546 - enum sci_status result = SCI_SUCCESS; 2547 - struct isci_host *ihost = scic_to_ihost(scic); 2548 - u32 index, state; 2549 - 2550 - if (scic->state_machine.current_state_id != 2551 - SCI_BASE_CONTROLLER_STATE_RESET) { 2552 - dev_warn(scic_to_dev(scic), 2553 - "SCIC Controller initialize operation requested " 2554 - "in invalid state\n"); 2555 - return SCI_FAILURE_INVALID_STATE; 2556 - } 2557 - 2558 - sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING); 2559 - 2560 - scic->timeout_timer = isci_timer_create(ihost, 2561 - scic, 2562 - scic_sds_controller_timeout_handler); 2563 - 2564 - scic_sds_controller_initialize_phy_startup(scic); 2565 - 2566 - scic_sds_controller_initialize_power_control(scic); 2567 - 2568 - /* 2569 - * There is nothing to do here for B0 since we do not have to 2570 - * program the AFE registers. 2571 - * / @todo The AFE settings are supposed to be correct for the B0 but 2572 - * / presently they seem to be wrong. */ 2573 - scic_sds_controller_afe_initialization(scic); 2574 - 2575 - if (result == SCI_SUCCESS) { 2576 - u32 status; 2577 - u32 terminate_loop; 2578 - 2579 - /* Take the hardware out of reset */ 2580 - writel(0, &scic->smu_registers->soft_reset_control); 2581 - 2582 - /* 2583 - * / @todo Provide meaningfull error code for hardware failure 2584 - * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ 2585 - result = SCI_FAILURE; 2586 - terminate_loop = 100; 2587 - 2588 - while (terminate_loop-- && (result != SCI_SUCCESS)) { 2589 - /* Loop until the hardware reports success */ 2590 - udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); 2591 - status = readl(&scic->smu_registers->control_status); 2592 - 2593 - if ((status & SCU_RAM_INIT_COMPLETED) == 2594 - SCU_RAM_INIT_COMPLETED) 2595 - result = SCI_SUCCESS; 2596 - } 2597 - } 2598 - 2599 - if (result == SCI_SUCCESS) { 2600 - u32 max_supported_ports; 2601 - u32 max_supported_devices; 2602 - u32 max_supported_io_requests; 2603 - u32 device_context_capacity; 2604 - 2605 - /* 2606 - * Determine what are the actaul device capacities that the 2607 - * hardware will support */ 2608 - device_context_capacity = 2609 - readl(&scic->smu_registers->device_context_capacity); 2610 - 2611 - 2612 - max_supported_ports = smu_dcc_get_max_ports(device_context_capacity); 2613 - max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity); 2614 - max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity); 2615 - 2616 - /* 2617 - * Make all PEs that are unassigned match up with the 2618 - * logical ports 2619 - */ 2620 - for (index = 0; index < max_supported_ports; index++) { 2621 - struct scu_port_task_scheduler_group_registers __iomem 2622 - *ptsg = &scic->scu_registers->peg0.ptsg; 2623 - 2624 - writel(index, &ptsg->protocol_engine[index]); 2625 - } 2626 - 2627 - /* Record the smaller of the two capacity values */ 2628 - scic->logical_port_entries = 2629 - min(max_supported_ports, scic->logical_port_entries); 2630 - 2631 - scic->task_context_entries = 2632 - min(max_supported_io_requests, 2633 - scic->task_context_entries); 2634 - 2635 - scic->remote_node_entries = 2636 - min(max_supported_devices, scic->remote_node_entries); 2637 - 2638 - /* 2639 - * Now that we have the correct hardware reported minimum values 2640 - * build the MDL for the controller. Default to a performance 2641 - * configuration. 2642 - */ 2643 - scic_controller_set_mode(scic, SCI_MODE_SPEED); 2644 - } 2645 - 2646 - /* Initialize hardware PCI Relaxed ordering in DMA engines */ 2647 - if (result == SCI_SUCCESS) { 2648 - u32 dma_configuration; 2649 - 2650 - /* Configure the payload DMA */ 2651 - dma_configuration = 2652 - readl(&scic->scu_registers->sdma.pdma_configuration); 2653 - dma_configuration |= 2654 - SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2655 - writel(dma_configuration, 2656 - &scic->scu_registers->sdma.pdma_configuration); 2657 - 2658 - /* Configure the control DMA */ 2659 - dma_configuration = 2660 - readl(&scic->scu_registers->sdma.cdma_configuration); 2661 - dma_configuration |= 2662 - SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2663 - writel(dma_configuration, 2664 - &scic->scu_registers->sdma.cdma_configuration); 2665 - } 2666 - 2667 - /* 2668 - * Initialize the PHYs before the PORTs because the PHY registers 2669 - * are accessed during the port initialization. 2670 - */ 2671 - if (result == SCI_SUCCESS) { 2672 - /* Initialize the phys */ 2673 - for (index = 0; 2674 - (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS); 2675 - index++) { 2676 - result = scic_sds_phy_initialize( 2677 - &ihost->phys[index].sci, 2678 - &scic->scu_registers->peg0.pe[index].tl, 2679 - &scic->scu_registers->peg0.pe[index].ll); 2680 - } 2681 - } 2682 - 2683 - if (result == SCI_SUCCESS) { 2684 - /* Initialize the logical ports */ 2685 - for (index = 0; 2686 - (index < scic->logical_port_entries) && 2687 - (result == SCI_SUCCESS); 2688 - index++) { 2689 - result = scic_sds_port_initialize( 2690 - &ihost->ports[index].sci, 2691 - &scic->scu_registers->peg0.ptsg.port[index], 2692 - &scic->scu_registers->peg0.ptsg.protocol_engine, 2693 - &scic->scu_registers->peg0.viit[index]); 2694 - } 2695 - } 2696 - 2697 - if (result == SCI_SUCCESS) 2698 - result = scic_sds_port_configuration_agent_initialize( 2699 - scic, 2700 - &scic->port_agent); 2701 - 2702 - /* Advance the controller state machine */ 2703 - if (result == SCI_SUCCESS) 2704 - state = SCI_BASE_CONTROLLER_STATE_INITIALIZED; 2705 - else 2706 - state = SCI_BASE_CONTROLLER_STATE_FAILED; 2707 - sci_base_state_machine_change_state(sm, state); 2708 - 2709 - return result; 2710 - } 2711 - 2712 - enum sci_status scic_controller_start(struct scic_sds_controller *scic, 2713 - u32 timeout) 2714 - { 2715 - struct isci_host *ihost = scic_to_ihost(scic); 2716 - enum sci_status result; 2717 - u16 index; 2718 - 2719 - if (scic->state_machine.current_state_id != 2720 - SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 2721 - dev_warn(scic_to_dev(scic), 2722 - "SCIC Controller start operation requested in " 2723 - "invalid state\n"); 2724 - return SCI_FAILURE_INVALID_STATE; 2725 - } 2726 - 2727 - /* Build the TCi free pool */ 2728 - sci_pool_initialize(scic->tci_pool); 2729 - for (index = 0; index < scic->task_context_entries; index++) 2730 - sci_pool_put(scic->tci_pool, index); 2731 - 2732 - /* Build the RNi free pool */ 2733 - scic_sds_remote_node_table_initialize( 2734 - &scic->available_remote_nodes, 2735 - scic->remote_node_entries); 2736 - 2737 - /* 2738 - * Before anything else lets make sure we will not be 2739 - * interrupted by the hardware. 2740 - */ 2741 - scic_controller_disable_interrupts(scic); 2742 - 2743 - /* Enable the port task scheduler */ 2744 - scic_sds_controller_enable_port_task_scheduler(scic); 2745 - 2746 - /* Assign all the task entries to scic physical function */ 2747 - scic_sds_controller_assign_task_entries(scic); 2748 - 2749 - /* Now initialize the completion queue */ 2750 - scic_sds_controller_initialize_completion_queue(scic); 2751 - 2752 - /* Initialize the unsolicited frame queue for use */ 2753 - scic_sds_controller_initialize_unsolicited_frame_queue(scic); 2754 - 2755 - /* Start all of the ports on this controller */ 2756 - for (index = 0; index < scic->logical_port_entries; index++) { 2757 - struct scic_sds_port *sci_port = &ihost->ports[index].sci; 2758 - 2759 - result = sci_port->state_handlers->start_handler(sci_port); 2760 - if (result) 2761 - return result; 2762 - } 2763 - 2764 - scic_sds_controller_start_next_phy(scic); 2765 - 2766 - isci_timer_start(scic->timeout_timer, timeout); 2767 - 2768 - sci_base_state_machine_change_state(&scic->state_machine, 2769 - SCI_BASE_CONTROLLER_STATE_STARTING); 2770 - 2771 - return SCI_SUCCESS; 2772 - } 2773 - 2774 - /** 2775 - * 2776 - * @object: This is the object which is cast to a struct scic_sds_controller 2777 - * object. 2778 - * 2779 - * This method implements the actions taken by the struct scic_sds_controller on entry 2780 - * to the SCI_BASE_CONTROLLER_STATE_INITIAL. - Set the state handlers to the 2781 - * controllers initial state. none This function should initialize the 2782 - * controller object. 2783 - */ 2784 - static void scic_sds_controller_initial_state_enter(void *object) 2785 - { 2786 - struct scic_sds_controller *scic = object; 2787 - 2788 - sci_base_state_machine_change_state(&scic->state_machine, 2789 - SCI_BASE_CONTROLLER_STATE_RESET); 2790 - } 2791 - 2792 - /** 2793 - * 2794 - * @object: This is the object which is cast to a struct scic_sds_controller 2795 - * object. 2796 - * 2797 - * This method implements the actions taken by the struct scic_sds_controller on exit 2798 - * from the SCI_BASE_CONTROLLER_STATE_STARTING. - This function stops the 2799 - * controller starting timeout timer. none 2800 - */ 2801 - static inline void scic_sds_controller_starting_state_exit(void *object) 2802 - { 2803 - struct scic_sds_controller *scic = object; 2804 - 2805 - isci_timer_stop(scic->timeout_timer); 2806 - } 2807 - 2808 - /** 2809 - * 2810 - * @object: This is the object which is cast to a struct scic_sds_controller 2811 - * object. 2812 - * 2813 - * This method implements the actions taken by the struct scic_sds_controller on entry 2814 - * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the 2815 - * controllers ready state. none 2816 - */ 2817 - static void scic_sds_controller_ready_state_enter(void *object) 2818 - { 2819 - struct scic_sds_controller *scic = object; 2820 - 2821 - /* set the default interrupt coalescence number and timeout value. */ 2822 - scic_controller_set_interrupt_coalescence( 2823 - scic, 0x10, 250); 2824 - } 2825 - 2826 - /** 2827 - * 2828 - * @object: This is the object which is cast to a struct scic_sds_controller 2829 - * object. 2830 - * 2831 - * This method implements the actions taken by the struct scic_sds_controller on exit 2832 - * from the SCI_BASE_CONTROLLER_STATE_READY. - This function does nothing. none 2833 - */ 2834 - static void scic_sds_controller_ready_state_exit(void *object) 2835 - { 2836 - struct scic_sds_controller *scic = object; 2837 - 2838 - /* disable interrupt coalescence. */ 2839 - scic_controller_set_interrupt_coalescence(scic, 0, 0); 2840 - } 2841 - 2842 - /** 2843 - * 2844 - * @object: This is the object which is cast to a struct scic_sds_controller 2845 - * object. 2846 - * 2847 - * This method implements the actions taken by the struct scic_sds_controller on entry 2848 - * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the 2849 - * controllers ready state. - Stop the phys on this controller - Stop the ports 2850 - * on this controller - Stop all of the remote devices on this controller none 2851 - */ 2852 - static void scic_sds_controller_stopping_state_enter(void *object) 2853 - { 2854 - struct scic_sds_controller *scic = object; 2855 - 2856 - /* Stop all of the components for this controller */ 2857 - scic_sds_controller_stop_phys(scic); 2858 - scic_sds_controller_stop_ports(scic); 2859 - scic_sds_controller_stop_devices(scic); 2860 - } 2861 - 2862 - /** 2863 - * 2864 - * @object: This is the object which is cast to a struct 2865 - * scic_sds_controller object. 2866 - * 2867 - * This function implements the actions taken by the struct scic_sds_controller 2868 - * on exit from the SCI_BASE_CONTROLLER_STATE_STOPPING. - 2869 - * This function stops the controller stopping timeout timer. 2870 - */ 2871 - static inline void scic_sds_controller_stopping_state_exit(void *object) 2872 - { 2873 - struct scic_sds_controller *scic = object; 2874 - 2875 - isci_timer_stop(scic->timeout_timer); 2876 - } 2877 - 2878 - static void scic_sds_controller_resetting_state_enter(void *object) 2879 - { 2880 - struct scic_sds_controller *scic = object; 2881 - 2882 - scic_sds_controller_reset_hardware(scic); 2883 - sci_base_state_machine_change_state(&scic->state_machine, 2884 - SCI_BASE_CONTROLLER_STATE_RESET); 2885 - } 2886 - 2887 - static const struct sci_base_state scic_sds_controller_state_table[] = { 2888 - [SCI_BASE_CONTROLLER_STATE_INITIAL] = { 2889 - .enter_state = scic_sds_controller_initial_state_enter, 2890 - }, 2891 - [SCI_BASE_CONTROLLER_STATE_RESET] = {}, 2892 - [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {}, 2893 - [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {}, 2894 - [SCI_BASE_CONTROLLER_STATE_STARTING] = { 2895 - .exit_state = scic_sds_controller_starting_state_exit, 2896 - }, 2897 - [SCI_BASE_CONTROLLER_STATE_READY] = { 2898 - .enter_state = scic_sds_controller_ready_state_enter, 2899 - .exit_state = scic_sds_controller_ready_state_exit, 2900 - }, 2901 - [SCI_BASE_CONTROLLER_STATE_RESETTING] = { 2902 - .enter_state = scic_sds_controller_resetting_state_enter, 2903 - }, 2904 - [SCI_BASE_CONTROLLER_STATE_STOPPING] = { 2905 - .enter_state = scic_sds_controller_stopping_state_enter, 2906 - .exit_state = scic_sds_controller_stopping_state_exit, 2907 - }, 2908 - [SCI_BASE_CONTROLLER_STATE_STOPPED] = {}, 2909 - [SCI_BASE_CONTROLLER_STATE_FAILED] = {} 2910 - }; 2911 - 2912 - /** 2913 - * scic_controller_construct() - This method will attempt to construct a 2914 - * controller object utilizing the supplied parameter information. 2915 - * @c: This parameter specifies the controller to be constructed. 2916 - * @scu_base: mapped base address of the scu registers 2917 - * @smu_base: mapped base address of the smu registers 2918 - * 2919 - * Indicate if the controller was successfully constructed or if it failed in 2920 - * some way. SCI_SUCCESS This value is returned if the controller was 2921 - * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned 2922 - * if the interrupt coalescence timer may cause SAS compliance issues for SMP 2923 - * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE 2924 - * This value is returned if the controller does not support the supplied type. 2925 - * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the 2926 - * controller does not support the supplied initialization data version. 2927 - */ 2928 - enum sci_status scic_controller_construct(struct scic_sds_controller *scic, 2929 - void __iomem *scu_base, 2930 - void __iomem *smu_base) 2931 - { 2932 - struct isci_host *ihost = scic_to_ihost(scic); 2933 - u8 i; 2934 - 2935 - sci_base_state_machine_construct(&scic->state_machine, 2936 - scic, scic_sds_controller_state_table, 2937 - SCI_BASE_CONTROLLER_STATE_INITIAL); 2938 - 2939 - sci_base_state_machine_start(&scic->state_machine); 2940 - 2941 - scic->scu_registers = scu_base; 2942 - scic->smu_registers = smu_base; 2943 - 2944 - scic_sds_port_configuration_agent_construct(&scic->port_agent); 2945 - 2946 - /* Construct the ports for this controller */ 2947 - for (i = 0; i < SCI_MAX_PORTS; i++) 2948 - scic_sds_port_construct(&ihost->ports[i].sci, i, scic); 2949 - scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic); 2950 - 2951 - /* Construct the phys for this controller */ 2952 - for (i = 0; i < SCI_MAX_PHYS; i++) { 2953 - /* Add all the PHYs to the dummy port */ 2954 - scic_sds_phy_construct(&ihost->phys[i].sci, 2955 - &ihost->ports[SCI_MAX_PORTS].sci, i); 2956 - } 2957 - 2958 - scic->invalid_phy_mask = 0; 2959 - 2960 - /* Set the default maximum values */ 2961 - scic->completion_event_entries = SCU_EVENT_COUNT; 2962 - scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT; 2963 - scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; 2964 - scic->logical_port_entries = SCI_MAX_PORTS; 2965 - scic->task_context_entries = SCU_IO_REQUEST_COUNT; 2966 - scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT; 2967 - scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT; 2968 - 2969 - /* Initialize the User and OEM parameters to default values. */ 2970 - scic_sds_controller_set_default_config_parameters(scic); 2971 - 2972 - return scic_controller_reset(scic); 2973 - }
-576
drivers/scsi/isci/core/scic_sds_controller.h
··· 1 - /* 2 - * This file is provided under a dual BSD/GPLv2 license. When using or 3 - * redistributing this file, you may do so under either license. 4 - * 5 - * GPL LICENSE SUMMARY 6 - * 7 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, write to the Free Software 20 - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 - * The full GNU General Public License is included in this distribution 22 - * in the file called LICENSE.GPL. 23 - * 24 - * BSD LICENSE 25 - * 26 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 - * All rights reserved. 28 - * 29 - * Redistribution and use in source and binary forms, with or without 30 - * modification, are permitted provided that the following conditions 31 - * are met: 32 - * 33 - * * Redistributions of source code must retain the above copyright 34 - * notice, this list of conditions and the following disclaimer. 35 - * * Redistributions in binary form must reproduce the above copyright 36 - * notice, this list of conditions and the following disclaimer in 37 - * the documentation and/or other materials provided with the 38 - * distribution. 39 - * * Neither the name of Intel Corporation nor the names of its 40 - * contributors may be used to endorse or promote products derived 41 - * from this software without specific prior written permission. 42 - * 43 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 - */ 55 - 56 - #ifndef _SCIC_SDS_CONTROLLER_H_ 57 - #define _SCIC_SDS_CONTROLLER_H_ 58 - 59 - #include <linux/string.h> 60 - #include <linux/io.h> 61 - 62 - /** 63 - * This file contains the structures, constants and prototypes used for the 64 - * core controller object. 65 - * 66 - * 67 - */ 68 - 69 - #include "sci_pool.h" 70 - #include "sci_base_state.h" 71 - #include "sci_base_state_machine.h" 72 - #include "scic_config_parameters.h" 73 - #include "scic_sds_port.h" 74 - #include "scic_sds_phy.h" 75 - #include "remote_node_table.h" 76 - #include "remote_device.h" 77 - #include "scu_registers.h" 78 - #include "scu_task_context.h" 79 - #include "scu_unsolicited_frame.h" 80 - #include "scic_sds_unsolicited_frame_control.h" 81 - #include "scic_sds_port_configuration_agent.h" 82 - 83 - struct sci_base_remote_device; 84 - struct scic_sds_remote_device; 85 - struct scic_sds_request; 86 - struct scic_sds_controller; 87 - 88 - /** 89 - * struct scic_power_control - 90 - * 91 - * This structure defines the fields for managing power control for direct 92 - * attached disk devices. 93 - */ 94 - struct scic_power_control { 95 - /** 96 - * This field is set when the power control timer is running and cleared when 97 - * it is not. 98 - */ 99 - bool timer_started; 100 - 101 - /** 102 - * This field is the handle to the driver timer object. This timer is used to 103 - * control when the directed attached disks can consume power. 104 - */ 105 - void *timer; 106 - 107 - /** 108 - * This field is used to keep track of how many phys are put into the 109 - * requesters field. 110 - */ 111 - u8 phys_waiting; 112 - 113 - /** 114 - * This field is used to keep track of how many phys have been granted to consume power 115 - */ 116 - u8 phys_granted_power; 117 - 118 - /** 119 - * This field is an array of phys that we are waiting on. The phys are direct 120 - * mapped into requesters via struct scic_sds_phy.phy_index 121 - */ 122 - struct scic_sds_phy *requesters[SCI_MAX_PHYS]; 123 - 124 - }; 125 - 126 - /** 127 - * struct scic_sds_controller - 128 - * 129 - * This structure represents the SCU controller object. 130 - */ 131 - struct scic_sds_controller { 132 - /** 133 - * This field contains the information for the base controller state 134 - * machine. 135 - */ 136 - struct sci_base_state_machine state_machine; 137 - 138 - /** 139 - * This field is the driver timer object handler used to time the controller 140 - * object start and stop requests. 141 - */ 142 - void *timeout_timer; 143 - 144 - /** 145 - * This field contains the user parameters to be utilized for this 146 - * core controller object. 147 - */ 148 - union scic_user_parameters user_parameters; 149 - 150 - /** 151 - * This field contains the OEM parameters to be utilized for this 152 - * core controller object. 153 - */ 154 - union scic_oem_parameters oem_parameters; 155 - 156 - /** 157 - * This field contains the port configuration agent for this controller. 158 - */ 159 - struct scic_sds_port_configuration_agent port_agent; 160 - 161 - /** 162 - * This field is the array of device objects that are currently constructed 163 - * for this controller object. This table is used as a fast lookup of device 164 - * objects that need to handle device completion notifications from the 165 - * hardware. The table is RNi based. 166 - */ 167 - struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; 168 - 169 - /** 170 - * This field is the array of IO request objects that are currently active for 171 - * this controller object. This table is used as a fast lookup of the io 172 - * request object that need to handle completion queue notifications. The 173 - * table is TCi based. 174 - */ 175 - struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS]; 176 - 177 - /** 178 - * This field is the free RNi data structure 179 - */ 180 - struct scic_remote_node_table available_remote_nodes; 181 - 182 - /** 183 - * This field is the TCi pool used to manage the task context index. 184 - */ 185 - SCI_POOL_CREATE(tci_pool, u16, SCI_MAX_IO_REQUESTS); 186 - 187 - /** 188 - * This filed is the struct scic_power_control data used to controll when direct 189 - * attached devices can consume power. 190 - */ 191 - struct scic_power_control power_control; 192 - 193 - /** 194 - * This field is the array of sequence values for the IO Tag fields. Even 195 - * though only 4 bits of the field is used for the sequence the sequence is 16 196 - * bits in size so the sequence can be bitwise or'd with the TCi to build the 197 - * IO Tag value. 198 - */ 199 - u16 io_request_sequence[SCI_MAX_IO_REQUESTS]; 200 - 201 - /** 202 - * This field in the array of sequence values for the RNi. These are used 203 - * to control io request build to io request start operations. The sequence 204 - * value is recorded into an io request when it is built and is checked on 205 - * the io request start operation to make sure that there was not a device 206 - * hot plug between the build and start operation. 207 - */ 208 - u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES]; 209 - 210 - /** 211 - * This field is a pointer to the memory allocated by the driver for the task 212 - * context table. This data is shared between the hardware and software. 213 - */ 214 - struct scu_task_context *task_context_table; 215 - 216 - /** 217 - * This field is a pointer to the memory allocated by the driver for the 218 - * remote node context table. This table is shared between the hardware and 219 - * software. 220 - */ 221 - union scu_remote_node_context *remote_node_context_table; 222 - 223 - /** 224 - * This field is a pointer to the completion queue. This memory is 225 - * written to by the hardware and read by the software. 226 - */ 227 - u32 *completion_queue; 228 - 229 - /** 230 - * This field is the software copy of the completion queue get pointer. The 231 - * controller object writes this value to the hardware after processing the 232 - * completion entries. 233 - */ 234 - u32 completion_queue_get; 235 - 236 - /** 237 - * This field is the minimum of the number of hardware supported port entries 238 - * and the software requested port entries. 239 - */ 240 - u32 logical_port_entries; 241 - 242 - /** 243 - * This field is the minimum number of hardware supported completion queue 244 - * entries and the software requested completion queue entries. 245 - */ 246 - u32 completion_queue_entries; 247 - 248 - /** 249 - * This field is the minimum number of hardware supported event entries and 250 - * the software requested event entries. 251 - */ 252 - u32 completion_event_entries; 253 - 254 - /** 255 - * This field is the minimum number of devices supported by the hardware and 256 - * the number of devices requested by the software. 257 - */ 258 - u32 remote_node_entries; 259 - 260 - /** 261 - * This field is the minimum number of IO requests supported by the hardware 262 - * and the number of IO requests requested by the software. 263 - */ 264 - u32 task_context_entries; 265 - 266 - /** 267 - * This object contains all of the unsolicited frame specific 268 - * data utilized by the core controller. 269 - */ 270 - struct scic_sds_unsolicited_frame_control uf_control; 271 - 272 - /* Phy Startup Data */ 273 - /** 274 - * This field is the driver timer handle for controller phy request startup. 275 - * On controller start the controller will start each PHY individually in 276 - * order of phy index. 277 - */ 278 - void *phy_startup_timer; 279 - 280 - /** 281 - * This field is set when the phy_startup_timer is running and is cleared when 282 - * the phy_startup_timer is stopped. 283 - */ 284 - bool phy_startup_timer_pending; 285 - 286 - /** 287 - * This field is the index of the next phy start. It is initialized to 0 and 288 - * increments for each phy index that is started. 289 - */ 290 - u32 next_phy_to_start; 291 - 292 - /** 293 - * This field controlls the invalid link up notifications to the SCI_USER. If 294 - * an invalid_link_up notification is reported a bit for the PHY index is set 295 - * so further notifications are not made. Once the PHY object reports link up 296 - * and is made part of a port then this bit for the PHY index is cleared. 297 - */ 298 - u8 invalid_phy_mask; 299 - 300 - /* 301 - * This field saves the current interrupt coalescing number of the controller. 302 - */ 303 - u16 interrupt_coalesce_number; 304 - 305 - /* 306 - * This field saves the current interrupt coalescing timeout value in microseconds. 307 - */ 308 - u32 interrupt_coalesce_timeout; 309 - 310 - /** 311 - * This field is a pointer to the memory mapped register space for the 312 - * struct smu_registers. 313 - */ 314 - struct smu_registers __iomem *smu_registers; 315 - 316 - /** 317 - * This field is a pointer to the memory mapped register space for the 318 - * struct scu_registers. 319 - */ 320 - struct scu_registers __iomem *scu_registers; 321 - 322 - }; 323 - 324 - /** 325 - * enum scic_sds_controller_states - This enumeration depicts all the states 326 - * for the common controller state machine. 327 - */ 328 - enum scic_sds_controller_states { 329 - /** 330 - * Simply the initial state for the base controller state machine. 331 - */ 332 - SCI_BASE_CONTROLLER_STATE_INITIAL = 0, 333 - 334 - /** 335 - * This state indicates that the controller is reset. The memory for 336 - * the controller is in it's initial state, but the controller requires 337 - * initialization. 338 - * This state is entered from the INITIAL state. 339 - * This state is entered from the RESETTING state. 340 - */ 341 - SCI_BASE_CONTROLLER_STATE_RESET, 342 - 343 - /** 344 - * This state is typically an action state that indicates the controller 345 - * is in the process of initialization. In this state no new IO operations 346 - * are permitted. 347 - * This state is entered from the RESET state. 348 - */ 349 - SCI_BASE_CONTROLLER_STATE_INITIALIZING, 350 - 351 - /** 352 - * This state indicates that the controller has been successfully 353 - * initialized. In this state no new IO operations are permitted. 354 - * This state is entered from the INITIALIZING state. 355 - */ 356 - SCI_BASE_CONTROLLER_STATE_INITIALIZED, 357 - 358 - /** 359 - * This state indicates the the controller is in the process of becoming 360 - * ready (i.e. starting). In this state no new IO operations are permitted. 361 - * This state is entered from the INITIALIZED state. 362 - */ 363 - SCI_BASE_CONTROLLER_STATE_STARTING, 364 - 365 - /** 366 - * This state indicates the controller is now ready. Thus, the user 367 - * is able to perform IO operations on the controller. 368 - * This state is entered from the STARTING state. 369 - */ 370 - SCI_BASE_CONTROLLER_STATE_READY, 371 - 372 - /** 373 - * This state is typically an action state that indicates the controller 374 - * is in the process of resetting. Thus, the user is unable to perform 375 - * IO operations on the controller. A reset is considered destructive in 376 - * most cases. 377 - * This state is entered from the READY state. 378 - * This state is entered from the FAILED state. 379 - * This state is entered from the STOPPED state. 380 - */ 381 - SCI_BASE_CONTROLLER_STATE_RESETTING, 382 - 383 - /** 384 - * This state indicates that the controller is in the process of stopping. 385 - * In this state no new IO operations are permitted, but existing IO 386 - * operations are allowed to complete. 387 - * This state is entered from the READY state. 388 - */ 389 - SCI_BASE_CONTROLLER_STATE_STOPPING, 390 - 391 - /** 392 - * This state indicates that the controller has successfully been stopped. 393 - * In this state no new IO operations are permitted. 394 - * This state is entered from the STOPPING state. 395 - */ 396 - SCI_BASE_CONTROLLER_STATE_STOPPED, 397 - 398 - /** 399 - * This state indicates that the controller could not successfully be 400 - * initialized. In this state no new IO operations are permitted. 401 - * This state is entered from the INITIALIZING state. 402 - * This state is entered from the STARTING state. 403 - * This state is entered from the STOPPING state. 404 - * This state is entered from the RESETTING state. 405 - */ 406 - SCI_BASE_CONTROLLER_STATE_FAILED, 407 - 408 - SCI_BASE_CONTROLLER_MAX_STATES 409 - 410 - }; 411 - 412 - /** 413 - * INCREMENT_QUEUE_GET() - 414 - * 415 - * This macro will increment the specified index to and if the index wraps to 0 416 - * it will toggel the cycle bit. 417 - */ 418 - #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \ 419 - { \ 420 - if ((index) + 1 == entry_count) { \ 421 - (index) = 0; \ 422 - (cycle) = (cycle) ^ (bit_toggle); \ 423 - } else { \ 424 - index = index + 1; \ 425 - } \ 426 - } 427 - 428 - /** 429 - * scic_sds_controller_get_port_configuration_agent() - 430 - * 431 - * This is a helper macro to get the port configuration agent from the 432 - * controller object. 433 - */ 434 - #define scic_sds_controller_get_port_configuration_agent(controller) \ 435 - (&(controller)->port_agent) 436 - 437 - /** 438 - * scic_sds_controller_get_protocol_engine_group() - 439 - * 440 - * This macro returns the protocol engine group for this controller object. 441 - * Presently we only support protocol engine group 0 so just return that 442 - */ 443 - #define scic_sds_controller_get_protocol_engine_group(controller) 0 444 - 445 - /** 446 - * scic_sds_io_tag_construct() - 447 - * 448 - * This macro constructs an IO tag from the sequence and index values. 449 - */ 450 - #define scic_sds_io_tag_construct(sequence, task_index) \ 451 - ((sequence) << 12 | (task_index)) 452 - 453 - /** 454 - * scic_sds_io_tag_get_sequence() - 455 - * 456 - * This macro returns the IO sequence from the IO tag value. 457 - */ 458 - #define scic_sds_io_tag_get_sequence(io_tag) \ 459 - (((io_tag) & 0xF000) >> 12) 460 - 461 - /** 462 - * scic_sds_io_tag_get_index() - 463 - * 464 - * This macro returns the TCi from the io tag value 465 - */ 466 - #define scic_sds_io_tag_get_index(io_tag) \ 467 - ((io_tag) & 0x0FFF) 468 - 469 - /** 470 - * scic_sds_io_sequence_increment() - 471 - * 472 - * This is a helper macro to increment the io sequence count. We may find in 473 - * the future that it will be faster to store the sequence count in such a way 474 - * as we dont perform the shift operation to build io tag values so therefore 475 - * need a way to incrment them correctly 476 - */ 477 - #define scic_sds_io_sequence_increment(value) \ 478 - ((value) = (((value) + 1) & 0x000F)) 479 - 480 - /* expander attached sata devices require 3 rnc slots */ 481 - static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev) 482 - { 483 - struct domain_device *dev = sci_dev_to_domain(sci_dev); 484 - 485 - if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 486 - !sci_dev->is_direct_attached) 487 - return SCU_STP_REMOTE_NODE_COUNT; 488 - return SCU_SSP_REMOTE_NODE_COUNT; 489 - } 490 - 491 - /** 492 - * scic_sds_controller_set_invalid_phy() - 493 - * 494 - * This macro will set the bit in the invalid phy mask for this controller 495 - * object. This is used to control messages reported for invalid link up 496 - * notifications. 497 - */ 498 - #define scic_sds_controller_set_invalid_phy(controller, phy) \ 499 - ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) 500 - 501 - /** 502 - * scic_sds_controller_clear_invalid_phy() - 503 - * 504 - * This macro will clear the bit in the invalid phy mask for this controller 505 - * object. This is used to control messages reported for invalid link up 506 - * notifications. 507 - */ 508 - #define scic_sds_controller_clear_invalid_phy(controller, phy) \ 509 - ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) 510 - 511 - void scic_sds_controller_post_request( 512 - struct scic_sds_controller *this_controller, 513 - u32 request); 514 - 515 - void scic_sds_controller_release_frame( 516 - struct scic_sds_controller *this_controller, 517 - u32 frame_index); 518 - 519 - void scic_sds_controller_copy_sata_response( 520 - void *response_buffer, 521 - void *frame_header, 522 - void *frame_buffer); 523 - 524 - enum sci_status scic_sds_controller_allocate_remote_node_context( 525 - struct scic_sds_controller *this_controller, 526 - struct scic_sds_remote_device *sci_dev, 527 - u16 *node_id); 528 - 529 - void scic_sds_controller_free_remote_node_context( 530 - struct scic_sds_controller *this_controller, 531 - struct scic_sds_remote_device *sci_dev, 532 - u16 node_id); 533 - 534 - union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( 535 - struct scic_sds_controller *this_controller, 536 - u16 node_id); 537 - 538 - struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, 539 - u16 io_tag); 540 - 541 - struct scu_task_context *scic_sds_controller_get_task_context_buffer( 542 - struct scic_sds_controller *this_controller, 543 - u16 io_tag); 544 - 545 - void scic_sds_controller_power_control_queue_insert( 546 - struct scic_sds_controller *this_controller, 547 - struct scic_sds_phy *sci_phy); 548 - 549 - void scic_sds_controller_power_control_queue_remove( 550 - struct scic_sds_controller *this_controller, 551 - struct scic_sds_phy *sci_phy); 552 - 553 - void scic_sds_controller_link_up( 554 - struct scic_sds_controller *this_controller, 555 - struct scic_sds_port *sci_port, 556 - struct scic_sds_phy *sci_phy); 557 - 558 - void scic_sds_controller_link_down( 559 - struct scic_sds_controller *this_controller, 560 - struct scic_sds_port *sci_port, 561 - struct scic_sds_phy *sci_phy); 562 - 563 - void scic_sds_controller_remote_device_stopped( 564 - struct scic_sds_controller *this_controller, 565 - struct scic_sds_remote_device *sci_dev); 566 - 567 - void scic_sds_controller_copy_task_context( 568 - struct scic_sds_controller *this_controller, 569 - struct scic_sds_request *this_request); 570 - 571 - void scic_sds_controller_register_setup( 572 - struct scic_sds_controller *this_controller); 573 - 574 - enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); 575 - 576 - #endif /* _SCIC_SDS_CONTROLLER_H_ */
+1 -2
drivers/scsi/isci/core/scic_sds_phy.c
··· 55 55 56 56 #include <scsi/sas.h> 57 57 #include "sas.h" 58 + #include "host.h" 58 59 #include "sci_base_state.h" 59 60 #include "sci_base_state_machine.h" 60 61 #include "scic_phy.h" 61 - #include "scic_sds_controller.h" 62 62 #include "scic_sds_phy.h" 63 63 #include "scic_sds_port.h" 64 64 #include "remote_node_context.h" 65 - #include "sci_environment.h" 66 65 #include "sci_util.h" 67 66 #include "scu_event_codes.h" 68 67 #include "timers.h"
+1 -3
drivers/scsi/isci/core/scic_sds_port.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 - #include "scic_controller.h" 56 + #include "host.h" 57 57 #include "scic_phy.h" 58 58 #include "scic_port.h" 59 - #include "scic_sds_controller.h" 60 59 #include "scic_sds_phy.h" 61 60 #include "scic_sds_port.h" 62 61 #include "remote_device.h" 63 62 #include "remote_node_context.h" 64 63 #include "scic_sds_request.h" 65 - #include "sci_environment.h" 66 64 #include "scu_registers.h" 67 65 #include "timers.h" 68 66
+1
drivers/scsi/isci/core/scic_sds_port.h
··· 57 57 #define _SCIC_SDS_PORT_H_ 58 58 59 59 #include <linux/kernel.h> 60 + #include "isci.h" 60 61 #include "sas.h" 61 62 #include "scu_registers.h" 62 63 #include "sci_base_state_machine.h"
+1 -3
drivers/scsi/isci/core/scic_sds_port_configuration_agent.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 - #include "sci_environment.h" 57 - #include "scic_controller.h" 58 - #include "scic_sds_controller.h" 56 + #include "host.h" 59 57 #include "scic_sds_port_configuration_agent.h" 60 58 #include "timers.h" 61 59
-3
drivers/scsi/isci/core/scic_sds_request.c
··· 54 54 */ 55 55 56 56 #include <scsi/sas.h> 57 - #include "scic_controller.h" 58 57 #include "scic_io_request.h" 59 - #include "scic_sds_controller.h" 60 58 #include "scu_registers.h" 61 59 #include "scic_sds_port.h" 62 60 #include "remote_device.h" ··· 62 64 #include "scic_sds_smp_request.h" 63 65 #include "scic_sds_stp_request.h" 64 66 #include "scic_sds_unsolicited_frame_control.h" 65 - #include "sci_environment.h" 66 67 #include "sci_util.h" 67 68 #include "scu_completion_codes.h" 68 69 #include "scu_task_context.h"
+1 -3
drivers/scsi/isci/core/scic_sds_smp_request.c
··· 55 55 56 56 #include <scsi/sas.h> 57 57 #include "sci_base_state_machine.h" 58 - #include "scic_controller.h" 59 - #include "scic_sds_controller.h" 60 58 #include "remote_device.h" 61 59 #include "scic_sds_request.h" 62 60 #include "scic_sds_smp_request.h" 63 - #include "sci_environment.h" 64 61 #include "sci_util.h" 65 62 #include "scu_completion_codes.h" 66 63 #include "scu_task_context.h" 64 + #include "host.h" 67 65 68 66 static void scu_smp_request_construct_task_context( 69 67 struct scic_sds_request *sci_req,
+1 -3
drivers/scsi/isci/core/scic_sds_ssp_request.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 + #include "host.h" 56 57 #include "sci_base_state_machine.h" 57 - #include "scic_controller.h" 58 - #include "scic_sds_controller.h" 59 58 #include "scic_sds_request.h" 60 - #include "sci_environment.h" 61 59 #include "scu_completion_codes.h" 62 60 #include "scu_task_context.h" 63 61
-2
drivers/scsi/isci/core/scic_sds_stp_request.c
··· 58 58 #include "sci_base_state.h" 59 59 #include "sci_base_state_machine.h" 60 60 #include "scic_io_request.h" 61 - #include "scic_sds_controller.h" 62 61 #include "remote_device.h" 63 62 #include "scic_sds_request.h" 64 63 #include "scic_sds_stp_pio_request.h" 65 64 #include "scic_sds_stp_request.h" 66 65 #include "scic_sds_unsolicited_frame_control.h" 67 - #include "sci_environment.h" 68 66 #include "sci_util.h" 69 67 #include "scu_completion_codes.h" 70 68 #include "scu_event_codes.h"
+1 -10
drivers/scsi/isci/core/scic_sds_unsolicited_frame_control.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 - /** 57 - * This file contains the implementation of the 58 - * struct scic_sds_unsolicited_frame_control object and it's public, protected, and 59 - * private methods. 60 - * 61 - * 62 - */ 63 - 56 + #include "host.h" 64 57 #include "scic_sds_unsolicited_frame_control.h" 65 58 #include "scu_registers.h" 66 - #include "scic_sds_controller.h" 67 59 #include "sci_util.h" 68 - #include "sci_environment.h" 69 60 70 61 /** 71 62 * This method will program the unsolicited frames (UFs) into the UF address
+2777 -8
drivers/scsi/isci/host.c
··· 52 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 - 55 + #include <linux/device.h> 56 + #include <scsi/sas.h> 57 + #include "host.h" 56 58 #include "isci.h" 57 - #include "scic_io_request.h" 58 - #include "scic_port.h" 59 59 #include "port.h" 60 - #include "request.h" 61 60 #include "host.h" 62 61 #include "probe_roms.h" 63 - #include "scic_controller.h" 64 - #include "scic_sds_controller.h" 62 + #include "remote_device.h" 63 + #include "request.h" 64 + #include "scic_io_request.h" 65 + #include "scic_sds_port_configuration_agent.h" 66 + #include "sci_util.h" 67 + #include "scu_completion_codes.h" 68 + #include "scu_event_codes.h" 69 + #include "scu_registers.h" 70 + #include "scu_remote_node_context.h" 71 + #include "scu_task_context.h" 72 + #include "scu_unsolicited_frame.h" 65 73 #include "timers.h" 74 + 75 + #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 76 + 77 + /** 78 + * smu_dcc_get_max_ports() - 79 + * 80 + * This macro returns the maximum number of logical ports supported by the 81 + * hardware. The caller passes in the value read from the device context 82 + * capacity register and this macro will mash and shift the value appropriately. 83 + */ 84 + #define smu_dcc_get_max_ports(dcc_value) \ 85 + (\ 86 + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ 87 + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ 88 + ) 89 + 90 + /** 91 + * smu_dcc_get_max_task_context() - 92 + * 93 + * This macro returns the maximum number of task contexts supported by the 94 + * hardware. The caller passes in the value read from the device context 95 + * capacity register and this macro will mash and shift the value appropriately. 96 + */ 97 + #define smu_dcc_get_max_task_context(dcc_value) \ 98 + (\ 99 + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ 100 + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ 101 + ) 102 + 103 + /** 104 + * smu_dcc_get_max_remote_node_context() - 105 + * 106 + * This macro returns the maximum number of remote node contexts supported by 107 + * the hardware. The caller passes in the value read from the device context 108 + * capacity register and this macro will mash and shift the value appropriately. 109 + */ 110 + #define smu_dcc_get_max_remote_node_context(dcc_value) \ 111 + (\ 112 + (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ 113 + >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ 114 + ) 115 + 116 + 117 + #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3 118 + #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3 119 + 120 + /** 121 + * 122 + * 123 + * The number of milliseconds to wait for a phy to start. 124 + */ 125 + #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 126 + 127 + /** 128 + * 129 + * 130 + * The number of milliseconds to wait while a given phy is consuming power 131 + * before allowing another set of phys to consume power. Ultimately, this will 132 + * be specified by OEM parameter. 133 + */ 134 + #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 135 + 136 + /** 137 + * NORMALIZE_PUT_POINTER() - 138 + * 139 + * This macro will normalize the completion queue put pointer so its value can 140 + * be used as an array inde 141 + */ 142 + #define NORMALIZE_PUT_POINTER(x) \ 143 + ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) 144 + 145 + 146 + /** 147 + * NORMALIZE_EVENT_POINTER() - 148 + * 149 + * This macro will normalize the completion queue event entry so its value can 150 + * be used as an index. 151 + */ 152 + #define NORMALIZE_EVENT_POINTER(x) \ 153 + (\ 154 + ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ 155 + >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ 156 + ) 157 + 158 + /** 159 + * INCREMENT_COMPLETION_QUEUE_GET() - 160 + * 161 + * This macro will increment the controllers completion queue index value and 162 + * possibly toggle the cycle bit if the completion queue index wraps back to 0. 163 + */ 164 + #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \ 165 + INCREMENT_QUEUE_GET(\ 166 + (index), \ 167 + (cycle), \ 168 + (controller)->completion_queue_entries, \ 169 + SMU_CQGR_CYCLE_BIT \ 170 + ) 171 + 172 + /** 173 + * INCREMENT_EVENT_QUEUE_GET() - 174 + * 175 + * This macro will increment the controllers event queue index value and 176 + * possibly toggle the event cycle bit if the event queue index wraps back to 0. 177 + */ 178 + #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \ 179 + INCREMENT_QUEUE_GET(\ 180 + (index), \ 181 + (cycle), \ 182 + (controller)->completion_event_entries, \ 183 + SMU_CQGR_EVENT_CYCLE_BIT \ 184 + ) 185 + 186 + 187 + /** 188 + * NORMALIZE_GET_POINTER() - 189 + * 190 + * This macro will normalize the completion queue get pointer so its value can 191 + * be used as an index into an array 192 + */ 193 + #define NORMALIZE_GET_POINTER(x) \ 194 + ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) 195 + 196 + /** 197 + * NORMALIZE_GET_POINTER_CYCLE_BIT() - 198 + * 199 + * This macro will normalize the completion queue cycle pointer so it matches 200 + * the completion queue cycle bit 201 + */ 202 + #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ 203 + ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) 204 + 205 + /** 206 + * COMPLETION_QUEUE_CYCLE_BIT() - 207 + * 208 + * This macro will return the cycle bit of the completion queue entry 209 + */ 210 + #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) 211 + 212 + static bool scic_sds_controller_completion_queue_has_entries( 213 + struct scic_sds_controller *scic) 214 + { 215 + u32 get_value = scic->completion_queue_get; 216 + u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; 217 + 218 + if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == 219 + COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])) 220 + return true; 221 + 222 + return false; 223 + } 224 + 225 + static bool scic_sds_controller_isr(struct scic_sds_controller *scic) 226 + { 227 + if (scic_sds_controller_completion_queue_has_entries(scic)) { 228 + return true; 229 + } else { 230 + /* 231 + * we have a spurious interrupt it could be that we have already 232 + * emptied the completion queue from a previous interrupt */ 233 + writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); 234 + 235 + /* 236 + * There is a race in the hardware that could cause us not to be notified 237 + * of an interrupt completion if we do not take this step. We will mask 238 + * then unmask the interrupts so if there is another interrupt pending 239 + * the clearing of the interrupt source we get the next interrupt message. */ 240 + writel(0xFF000000, &scic->smu_registers->interrupt_mask); 241 + writel(0, &scic->smu_registers->interrupt_mask); 242 + } 243 + 244 + return false; 245 + } 66 246 67 247 irqreturn_t isci_msix_isr(int vec, void *data) 68 248 { ··· 252 72 tasklet_schedule(&ihost->completion_tasklet); 253 73 254 74 return IRQ_HANDLED; 75 + } 76 + 77 + static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic) 78 + { 79 + u32 interrupt_status; 80 + 81 + interrupt_status = 82 + readl(&scic->smu_registers->interrupt_status); 83 + interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); 84 + 85 + if (interrupt_status != 0) { 86 + /* 87 + * There is an error interrupt pending so let it through and handle 88 + * in the callback */ 89 + return true; 90 + } 91 + 92 + /* 93 + * There is a race in the hardware that could cause us not to be notified 94 + * of an interrupt completion if we do not take this step. We will mask 95 + * then unmask the error interrupts so if there was another interrupt 96 + * pending we will be notified. 97 + * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ 98 + writel(0xff, &scic->smu_registers->interrupt_mask); 99 + writel(0, &scic->smu_registers->interrupt_mask); 100 + 101 + return false; 102 + } 103 + 104 + static void scic_sds_controller_task_completion(struct scic_sds_controller *scic, 105 + u32 completion_entry) 106 + { 107 + u32 index; 108 + struct scic_sds_request *io_request; 109 + 110 + index = SCU_GET_COMPLETION_INDEX(completion_entry); 111 + io_request = scic->io_request_table[index]; 112 + 113 + /* Make sure that we really want to process this IO request */ 114 + if ( 115 + (io_request != NULL) 116 + && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) 117 + && ( 118 + scic_sds_io_tag_get_sequence(io_request->io_tag) 119 + == scic->io_request_sequence[index] 120 + ) 121 + ) { 122 + /* Yep this is a valid io request pass it along to the io request handler */ 123 + scic_sds_io_request_tc_completion(io_request, completion_entry); 124 + } 125 + } 126 + 127 + static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic, 128 + u32 completion_entry) 129 + { 130 + u32 index; 131 + struct scic_sds_request *io_request; 132 + struct scic_sds_remote_device *device; 133 + 134 + index = SCU_GET_COMPLETION_INDEX(completion_entry); 135 + 136 + switch (scu_get_command_request_type(completion_entry)) { 137 + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 138 + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 139 + io_request = scic->io_request_table[index]; 140 + dev_warn(scic_to_dev(scic), 141 + "%s: SCIC SDS Completion type SDMA %x for io request " 142 + "%p\n", 143 + __func__, 144 + completion_entry, 145 + io_request); 146 + /* @todo For a post TC operation we need to fail the IO 147 + * request 148 + */ 149 + break; 150 + 151 + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: 152 + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: 153 + case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 154 + device = scic->device_table[index]; 155 + dev_warn(scic_to_dev(scic), 156 + "%s: SCIC SDS Completion type SDMA %x for remote " 157 + "device %p\n", 158 + __func__, 159 + completion_entry, 160 + device); 161 + /* @todo For a port RNC operation we need to fail the 162 + * device 163 + */ 164 + break; 165 + 166 + default: 167 + dev_warn(scic_to_dev(scic), 168 + "%s: SCIC SDS Completion unknown SDMA completion " 169 + "type %x\n", 170 + __func__, 171 + completion_entry); 172 + break; 173 + 174 + } 175 + } 176 + 177 + static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic, 178 + u32 completion_entry) 179 + { 180 + u32 index; 181 + u32 frame_index; 182 + 183 + struct isci_host *ihost = scic_to_ihost(scic); 184 + struct scu_unsolicited_frame_header *frame_header; 185 + struct scic_sds_phy *phy; 186 + struct scic_sds_remote_device *device; 187 + 188 + enum sci_status result = SCI_FAILURE; 189 + 190 + frame_index = SCU_GET_FRAME_INDEX(completion_entry); 191 + 192 + frame_header = scic->uf_control.buffers.array[frame_index].header; 193 + scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; 194 + 195 + if (SCU_GET_FRAME_ERROR(completion_entry)) { 196 + /* 197 + * / @todo If the IAF frame or SIGNATURE FIS frame has an error will 198 + * / this cause a problem? We expect the phy initialization will 199 + * / fail if there is an error in the frame. */ 200 + scic_sds_controller_release_frame(scic, frame_index); 201 + return; 202 + } 203 + 204 + if (frame_header->is_address_frame) { 205 + index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 206 + phy = &ihost->phys[index].sci; 207 + result = scic_sds_phy_frame_handler(phy, frame_index); 208 + } else { 209 + 210 + index = SCU_GET_COMPLETION_INDEX(completion_entry); 211 + 212 + if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 213 + /* 214 + * This is a signature fis or a frame from a direct attached SATA 215 + * device that has not yet been created. In either case forwared 216 + * the frame to the PE and let it take care of the frame data. */ 217 + index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 218 + phy = &ihost->phys[index].sci; 219 + result = scic_sds_phy_frame_handler(phy, frame_index); 220 + } else { 221 + if (index < scic->remote_node_entries) 222 + device = scic->device_table[index]; 223 + else 224 + device = NULL; 225 + 226 + if (device != NULL) 227 + result = scic_sds_remote_device_frame_handler(device, frame_index); 228 + else 229 + scic_sds_controller_release_frame(scic, frame_index); 230 + } 231 + } 232 + 233 + if (result != SCI_SUCCESS) { 234 + /* 235 + * / @todo Is there any reason to report some additional error message 236 + * / when we get this failure notifiction? */ 237 + } 238 + } 239 + 240 + static void scic_sds_controller_event_completion(struct scic_sds_controller *scic, 241 + u32 completion_entry) 242 + { 243 + struct isci_host *ihost = scic_to_ihost(scic); 244 + struct scic_sds_request *io_request; 245 + struct scic_sds_remote_device *device; 246 + struct scic_sds_phy *phy; 247 + u32 index; 248 + 249 + index = SCU_GET_COMPLETION_INDEX(completion_entry); 250 + 251 + switch (scu_get_event_type(completion_entry)) { 252 + case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: 253 + /* / @todo The driver did something wrong and we need to fix the condtion. */ 254 + dev_err(scic_to_dev(scic), 255 + "%s: SCIC Controller 0x%p received SMU command error " 256 + "0x%x\n", 257 + __func__, 258 + scic, 259 + completion_entry); 260 + break; 261 + 262 + case SCU_EVENT_TYPE_SMU_PCQ_ERROR: 263 + case SCU_EVENT_TYPE_SMU_ERROR: 264 + case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: 265 + /* 266 + * / @todo This is a hardware failure and its likely that we want to 267 + * / reset the controller. */ 268 + dev_err(scic_to_dev(scic), 269 + "%s: SCIC Controller 0x%p received fatal controller " 270 + "event 0x%x\n", 271 + __func__, 272 + scic, 273 + completion_entry); 274 + break; 275 + 276 + case SCU_EVENT_TYPE_TRANSPORT_ERROR: 277 + io_request = scic->io_request_table[index]; 278 + scic_sds_io_request_event_handler(io_request, completion_entry); 279 + break; 280 + 281 + case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 282 + switch (scu_get_event_specifier(completion_entry)) { 283 + case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 284 + case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 285 + io_request = scic->io_request_table[index]; 286 + if (io_request != NULL) 287 + scic_sds_io_request_event_handler(io_request, completion_entry); 288 + else 289 + dev_warn(scic_to_dev(scic), 290 + "%s: SCIC Controller 0x%p received " 291 + "event 0x%x for io request object " 292 + "that doesnt exist.\n", 293 + __func__, 294 + scic, 295 + completion_entry); 296 + 297 + break; 298 + 299 + case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: 300 + device = scic->device_table[index]; 301 + if (device != NULL) 302 + scic_sds_remote_device_event_handler(device, completion_entry); 303 + else 304 + dev_warn(scic_to_dev(scic), 305 + "%s: SCIC Controller 0x%p received " 306 + "event 0x%x for remote device object " 307 + "that doesnt exist.\n", 308 + __func__, 309 + scic, 310 + completion_entry); 311 + 312 + break; 313 + } 314 + break; 315 + 316 + case SCU_EVENT_TYPE_BROADCAST_CHANGE: 317 + /* 318 + * direct the broadcast change event to the phy first and then let 319 + * the phy redirect the broadcast change to the port object */ 320 + case SCU_EVENT_TYPE_ERR_CNT_EVENT: 321 + /* 322 + * direct error counter event to the phy object since that is where 323 + * we get the event notification. This is a type 4 event. */ 324 + case SCU_EVENT_TYPE_OSSP_EVENT: 325 + index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry); 326 + phy = &ihost->phys[index].sci; 327 + scic_sds_phy_event_handler(phy, completion_entry); 328 + break; 329 + 330 + case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 331 + case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 332 + case SCU_EVENT_TYPE_RNC_OPS_MISC: 333 + if (index < scic->remote_node_entries) { 334 + device = scic->device_table[index]; 335 + 336 + if (device != NULL) 337 + scic_sds_remote_device_event_handler(device, completion_entry); 338 + } else 339 + dev_err(scic_to_dev(scic), 340 + "%s: SCIC Controller 0x%p received event 0x%x " 341 + "for remote device object 0x%0x that doesnt " 342 + "exist.\n", 343 + __func__, 344 + scic, 345 + completion_entry, 346 + index); 347 + 348 + break; 349 + 350 + default: 351 + dev_warn(scic_to_dev(scic), 352 + "%s: SCIC Controller received unknown event code %x\n", 353 + __func__, 354 + completion_entry); 355 + break; 356 + } 357 + } 358 + 359 + 360 + 361 + static void scic_sds_controller_process_completions(struct scic_sds_controller *scic) 362 + { 363 + u32 completion_count = 0; 364 + u32 completion_entry; 365 + u32 get_index; 366 + u32 get_cycle; 367 + u32 event_index; 368 + u32 event_cycle; 369 + 370 + dev_dbg(scic_to_dev(scic), 371 + "%s: completion queue begining get:0x%08x\n", 372 + __func__, 373 + scic->completion_queue_get); 374 + 375 + /* Get the component parts of the completion queue */ 376 + get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get); 377 + get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get; 378 + 379 + event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get); 380 + event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get; 381 + 382 + while ( 383 + NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) 384 + == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]) 385 + ) { 386 + completion_count++; 387 + 388 + completion_entry = scic->completion_queue[get_index]; 389 + INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle); 390 + 391 + dev_dbg(scic_to_dev(scic), 392 + "%s: completion queue entry:0x%08x\n", 393 + __func__, 394 + completion_entry); 395 + 396 + switch (SCU_GET_COMPLETION_TYPE(completion_entry)) { 397 + case SCU_COMPLETION_TYPE_TASK: 398 + scic_sds_controller_task_completion(scic, completion_entry); 399 + break; 400 + 401 + case SCU_COMPLETION_TYPE_SDMA: 402 + scic_sds_controller_sdma_completion(scic, completion_entry); 403 + break; 404 + 405 + case SCU_COMPLETION_TYPE_UFI: 406 + scic_sds_controller_unsolicited_frame(scic, completion_entry); 407 + break; 408 + 409 + case SCU_COMPLETION_TYPE_EVENT: 410 + INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle); 411 + scic_sds_controller_event_completion(scic, completion_entry); 412 + break; 413 + 414 + case SCU_COMPLETION_TYPE_NOTIFY: 415 + /* 416 + * Presently we do the same thing with a notify event that we do with the 417 + * other event codes. */ 418 + INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle); 419 + scic_sds_controller_event_completion(scic, completion_entry); 420 + break; 421 + 422 + default: 423 + dev_warn(scic_to_dev(scic), 424 + "%s: SCIC Controller received unknown " 425 + "completion type %x\n", 426 + __func__, 427 + completion_entry); 428 + break; 429 + } 430 + } 431 + 432 + /* Update the get register if we completed one or more entries */ 433 + if (completion_count > 0) { 434 + scic->completion_queue_get = 435 + SMU_CQGR_GEN_BIT(ENABLE) | 436 + SMU_CQGR_GEN_BIT(EVENT_ENABLE) | 437 + event_cycle | 438 + SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) | 439 + get_cycle | 440 + SMU_CQGR_GEN_VAL(POINTER, get_index); 441 + 442 + writel(scic->completion_queue_get, 443 + &scic->smu_registers->completion_queue_get); 444 + 445 + } 446 + 447 + dev_dbg(scic_to_dev(scic), 448 + "%s: completion queue ending get:0x%08x\n", 449 + __func__, 450 + scic->completion_queue_get); 451 + 452 + } 453 + 454 + static void scic_sds_controller_error_handler(struct scic_sds_controller *scic) 455 + { 456 + u32 interrupt_status; 457 + 458 + interrupt_status = 459 + readl(&scic->smu_registers->interrupt_status); 460 + 461 + if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && 462 + scic_sds_controller_completion_queue_has_entries(scic)) { 463 + 464 + scic_sds_controller_process_completions(scic); 465 + writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status); 466 + } else { 467 + dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__, 468 + interrupt_status); 469 + 470 + sci_base_state_machine_change_state(&scic->state_machine, 471 + SCI_BASE_CONTROLLER_STATE_FAILED); 472 + 473 + return; 474 + } 475 + 476 + /* If we dont process any completions I am not sure that we want to do this. 477 + * We are in the middle of a hardware fault and should probably be reset. 478 + */ 479 + writel(0, &scic->smu_registers->interrupt_mask); 255 480 } 256 481 257 482 irqreturn_t isci_intx_isr(int vec, void *data) ··· 697 112 * core library. 698 113 * 699 114 */ 700 - void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) 115 + static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) 701 116 { 702 117 if (completion_status != SCI_SUCCESS) 703 118 dev_info(&ihost->pdev->dev, ··· 727 142 728 143 } 729 144 145 + /** 146 + * scic_controller_get_suggested_start_timeout() - This method returns the 147 + * suggested scic_controller_start() timeout amount. The user is free to 148 + * use any timeout value, but this method provides the suggested minimum 149 + * start timeout value. The returned value is based upon empirical 150 + * information determined as a result of interoperability testing. 151 + * @controller: the handle to the controller object for which to return the 152 + * suggested start timeout. 153 + * 154 + * This method returns the number of milliseconds for the suggested start 155 + * operation timeout. 156 + */ 157 + static u32 scic_controller_get_suggested_start_timeout( 158 + struct scic_sds_controller *sc) 159 + { 160 + /* Validate the user supplied parameters. */ 161 + if (sc == NULL) 162 + return 0; 163 + 164 + /* 165 + * The suggested minimum timeout value for a controller start operation: 166 + * 167 + * Signature FIS Timeout 168 + * + Phy Start Timeout 169 + * + Number of Phy Spin Up Intervals 170 + * --------------------------------- 171 + * Number of milliseconds for the controller start operation. 172 + * 173 + * NOTE: The number of phy spin up intervals will be equivalent 174 + * to the number of phys divided by the number phys allowed 175 + * per interval - 1 (once OEM parameters are supported). 176 + * Currently we assume only 1 phy per interval. */ 177 + 178 + return SCIC_SDS_SIGNATURE_FIS_TIMEOUT 179 + + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 180 + + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 181 + } 182 + 183 + static void scic_controller_enable_interrupts( 184 + struct scic_sds_controller *scic) 185 + { 186 + BUG_ON(scic->smu_registers == NULL); 187 + writel(0, &scic->smu_registers->interrupt_mask); 188 + } 189 + 190 + void scic_controller_disable_interrupts( 191 + struct scic_sds_controller *scic) 192 + { 193 + BUG_ON(scic->smu_registers == NULL); 194 + writel(0xffffffff, &scic->smu_registers->interrupt_mask); 195 + } 196 + 197 + static void scic_sds_controller_enable_port_task_scheduler( 198 + struct scic_sds_controller *scic) 199 + { 200 + u32 port_task_scheduler_value; 201 + 202 + port_task_scheduler_value = 203 + readl(&scic->scu_registers->peg0.ptsg.control); 204 + port_task_scheduler_value |= 205 + (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | 206 + SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); 207 + writel(port_task_scheduler_value, 208 + &scic->scu_registers->peg0.ptsg.control); 209 + } 210 + 211 + static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic) 212 + { 213 + u32 task_assignment; 214 + 215 + /* 216 + * Assign all the TCs to function 0 217 + * TODO: Do we actually need to read this register to write it back? 218 + */ 219 + 220 + task_assignment = 221 + readl(&scic->smu_registers->task_context_assignment[0]); 222 + 223 + task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | 224 + (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) | 225 + (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); 226 + 227 + writel(task_assignment, 228 + &scic->smu_registers->task_context_assignment[0]); 229 + 230 + } 231 + 232 + static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic) 233 + { 234 + u32 index; 235 + u32 completion_queue_control_value; 236 + u32 completion_queue_get_value; 237 + u32 completion_queue_put_value; 238 + 239 + scic->completion_queue_get = 0; 240 + 241 + completion_queue_control_value = ( 242 + SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1) 243 + | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1) 244 + ); 245 + 246 + writel(completion_queue_control_value, 247 + &scic->smu_registers->completion_queue_control); 248 + 249 + 250 + /* Set the completion queue get pointer and enable the queue */ 251 + completion_queue_get_value = ( 252 + (SMU_CQGR_GEN_VAL(POINTER, 0)) 253 + | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) 254 + | (SMU_CQGR_GEN_BIT(ENABLE)) 255 + | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) 256 + ); 257 + 258 + writel(completion_queue_get_value, 259 + &scic->smu_registers->completion_queue_get); 260 + 261 + /* Set the completion queue put pointer */ 262 + completion_queue_put_value = ( 263 + (SMU_CQPR_GEN_VAL(POINTER, 0)) 264 + | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) 265 + ); 266 + 267 + writel(completion_queue_put_value, 268 + &scic->smu_registers->completion_queue_put); 269 + 270 + /* Initialize the cycle bit of the completion queue entries */ 271 + for (index = 0; index < scic->completion_queue_entries; index++) { 272 + /* 273 + * If get.cycle_bit != completion_queue.cycle_bit 274 + * its not a valid completion queue entry 275 + * so at system start all entries are invalid */ 276 + scic->completion_queue[index] = 0x80000000; 277 + } 278 + } 279 + 280 + static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic) 281 + { 282 + u32 frame_queue_control_value; 283 + u32 frame_queue_get_value; 284 + u32 frame_queue_put_value; 285 + 286 + /* Write the queue size */ 287 + frame_queue_control_value = 288 + SCU_UFQC_GEN_VAL(QUEUE_SIZE, 289 + scic->uf_control.address_table.count); 290 + 291 + writel(frame_queue_control_value, 292 + &scic->scu_registers->sdma.unsolicited_frame_queue_control); 293 + 294 + /* Setup the get pointer for the unsolicited frame queue */ 295 + frame_queue_get_value = ( 296 + SCU_UFQGP_GEN_VAL(POINTER, 0) 297 + | SCU_UFQGP_GEN_BIT(ENABLE_BIT) 298 + ); 299 + 300 + writel(frame_queue_get_value, 301 + &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 302 + /* Setup the put pointer for the unsolicited frame queue */ 303 + frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); 304 + writel(frame_queue_put_value, 305 + &scic->scu_registers->sdma.unsolicited_frame_put_pointer); 306 + } 307 + 308 + /** 309 + * This method will attempt to transition into the ready state for the 310 + * controller and indicate that the controller start operation has completed 311 + * if all criteria are met. 312 + * @scic: This parameter indicates the controller object for which 313 + * to transition to ready. 314 + * @status: This parameter indicates the status value to be pass into the call 315 + * to scic_cb_controller_start_complete(). 316 + * 317 + * none. 318 + */ 319 + static void scic_sds_controller_transition_to_ready( 320 + struct scic_sds_controller *scic, 321 + enum sci_status status) 322 + { 323 + struct isci_host *ihost = scic_to_ihost(scic); 324 + 325 + if (scic->state_machine.current_state_id == 326 + SCI_BASE_CONTROLLER_STATE_STARTING) { 327 + /* 328 + * We move into the ready state, because some of the phys/ports 329 + * may be up and operational. 330 + */ 331 + sci_base_state_machine_change_state(&scic->state_machine, 332 + SCI_BASE_CONTROLLER_STATE_READY); 333 + 334 + isci_host_start_complete(ihost, status); 335 + } 336 + } 337 + 338 + static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic) 339 + { 340 + isci_timer_stop(scic->phy_startup_timer); 341 + 342 + scic->phy_startup_timer_pending = false; 343 + } 344 + 345 + static void scic_sds_controller_phy_timer_start(struct scic_sds_controller *scic) 346 + { 347 + isci_timer_start(scic->phy_startup_timer, 348 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); 349 + 350 + scic->phy_startup_timer_pending = true; 351 + } 352 + 353 + /** 354 + * scic_sds_controller_start_next_phy - start phy 355 + * @scic: controller 356 + * 357 + * If all the phys have been started, then attempt to transition the 358 + * controller to the READY state and inform the user 359 + * (scic_cb_controller_start_complete()). 360 + */ 361 + static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic) 362 + { 363 + struct isci_host *ihost = scic_to_ihost(scic); 364 + struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; 365 + struct scic_sds_phy *sci_phy; 366 + enum sci_status status; 367 + 368 + status = SCI_SUCCESS; 369 + 370 + if (scic->phy_startup_timer_pending) 371 + return status; 372 + 373 + if (scic->next_phy_to_start >= SCI_MAX_PHYS) { 374 + bool is_controller_start_complete = true; 375 + u32 state; 376 + u8 index; 377 + 378 + for (index = 0; index < SCI_MAX_PHYS; index++) { 379 + sci_phy = &ihost->phys[index].sci; 380 + state = sci_phy->state_machine.current_state_id; 381 + 382 + if (!scic_sds_phy_get_port(sci_phy)) 383 + continue; 384 + 385 + /* The controller start operation is complete iff: 386 + * - all links have been given an opportunity to start 387 + * - have no indication of a connected device 388 + * - have an indication of a connected device and it has 389 + * finished the link training process. 390 + */ 391 + if ((sci_phy->is_in_link_training == false && 392 + state == SCI_BASE_PHY_STATE_INITIAL) || 393 + (sci_phy->is_in_link_training == false && 394 + state == SCI_BASE_PHY_STATE_STOPPED) || 395 + (sci_phy->is_in_link_training == true && 396 + state == SCI_BASE_PHY_STATE_STARTING)) { 397 + is_controller_start_complete = false; 398 + break; 399 + } 400 + } 401 + 402 + /* 403 + * The controller has successfully finished the start process. 404 + * Inform the SCI Core user and transition to the READY state. */ 405 + if (is_controller_start_complete == true) { 406 + scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS); 407 + scic_sds_controller_phy_timer_stop(scic); 408 + } 409 + } else { 410 + sci_phy = &ihost->phys[scic->next_phy_to_start].sci; 411 + 412 + if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 413 + if (scic_sds_phy_get_port(sci_phy) == NULL) { 414 + scic->next_phy_to_start++; 415 + 416 + /* Caution recursion ahead be forwarned 417 + * 418 + * The PHY was never added to a PORT in MPC mode 419 + * so start the next phy in sequence This phy 420 + * will never go link up and will not draw power 421 + * the OEM parameters either configured the phy 422 + * incorrectly for the PORT or it was never 423 + * assigned to a PORT 424 + */ 425 + return scic_sds_controller_start_next_phy(scic); 426 + } 427 + } 428 + 429 + status = scic_sds_phy_start(sci_phy); 430 + 431 + if (status == SCI_SUCCESS) { 432 + scic_sds_controller_phy_timer_start(scic); 433 + } else { 434 + dev_warn(scic_to_dev(scic), 435 + "%s: Controller stop operation failed " 436 + "to stop phy %d because of status " 437 + "%d.\n", 438 + __func__, 439 + ihost->phys[scic->next_phy_to_start].sci.phy_index, 440 + status); 441 + } 442 + 443 + scic->next_phy_to_start++; 444 + } 445 + 446 + return status; 447 + } 448 + 449 + static void scic_sds_controller_phy_startup_timeout_handler(void *_scic) 450 + { 451 + struct scic_sds_controller *scic = _scic; 452 + enum sci_status status; 453 + 454 + scic->phy_startup_timer_pending = false; 455 + status = SCI_FAILURE; 456 + while (status != SCI_SUCCESS) 457 + status = scic_sds_controller_start_next_phy(scic); 458 + } 459 + 460 + static enum sci_status scic_controller_start(struct scic_sds_controller *scic, 461 + u32 timeout) 462 + { 463 + struct isci_host *ihost = scic_to_ihost(scic); 464 + enum sci_status result; 465 + u16 index; 466 + 467 + if (scic->state_machine.current_state_id != 468 + SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 469 + dev_warn(scic_to_dev(scic), 470 + "SCIC Controller start operation requested in " 471 + "invalid state\n"); 472 + return SCI_FAILURE_INVALID_STATE; 473 + } 474 + 475 + /* Build the TCi free pool */ 476 + sci_pool_initialize(scic->tci_pool); 477 + for (index = 0; index < scic->task_context_entries; index++) 478 + sci_pool_put(scic->tci_pool, index); 479 + 480 + /* Build the RNi free pool */ 481 + scic_sds_remote_node_table_initialize( 482 + &scic->available_remote_nodes, 483 + scic->remote_node_entries); 484 + 485 + /* 486 + * Before anything else lets make sure we will not be 487 + * interrupted by the hardware. 488 + */ 489 + scic_controller_disable_interrupts(scic); 490 + 491 + /* Enable the port task scheduler */ 492 + scic_sds_controller_enable_port_task_scheduler(scic); 493 + 494 + /* Assign all the task entries to scic physical function */ 495 + scic_sds_controller_assign_task_entries(scic); 496 + 497 + /* Now initialize the completion queue */ 498 + scic_sds_controller_initialize_completion_queue(scic); 499 + 500 + /* Initialize the unsolicited frame queue for use */ 501 + scic_sds_controller_initialize_unsolicited_frame_queue(scic); 502 + 503 + /* Start all of the ports on this controller */ 504 + for (index = 0; index < scic->logical_port_entries; index++) { 505 + struct scic_sds_port *sci_port = &ihost->ports[index].sci; 506 + 507 + result = sci_port->state_handlers->start_handler(sci_port); 508 + if (result) 509 + return result; 510 + } 511 + 512 + scic_sds_controller_start_next_phy(scic); 513 + 514 + isci_timer_start(scic->timeout_timer, timeout); 515 + 516 + sci_base_state_machine_change_state(&scic->state_machine, 517 + SCI_BASE_CONTROLLER_STATE_STARTING); 518 + 519 + return SCI_SUCCESS; 520 + } 521 + 730 522 void isci_host_scan_start(struct Scsi_Host *shost) 731 523 { 732 524 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; ··· 1117 155 spin_unlock_irq(&ihost->scic_lock); 1118 156 } 1119 157 1120 - void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 158 + static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) 1121 159 { 1122 160 isci_host_change_state(ihost, isci_stopped); 1123 161 scic_controller_disable_interrupts(&ihost->sci); 1124 162 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1125 163 wake_up(&ihost->eventq); 164 + } 165 + 166 + static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic) 167 + { 168 + /* Empty out the completion queue */ 169 + if (scic_sds_controller_completion_queue_has_entries(scic)) 170 + scic_sds_controller_process_completions(scic); 171 + 172 + /* Clear the interrupt and enable all interrupts again */ 173 + writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status); 174 + /* Could we write the value of SMU_ISR_COMPLETION? */ 175 + writel(0xFF000000, &scic->smu_registers->interrupt_mask); 176 + writel(0, &scic->smu_registers->interrupt_mask); 1126 177 } 1127 178 1128 179 /** ··· 1248 273 1249 274 } 1250 275 276 + /** 277 + * scic_controller_stop() - This method will stop an individual controller 278 + * object.This method will invoke the associated user callback upon 279 + * completion. The completion callback is called when the following 280 + * conditions are met: -# the method return status is SCI_SUCCESS. -# the 281 + * controller has been quiesced. This method will ensure that all IO 282 + * requests are quiesced, phys are stopped, and all additional operation by 283 + * the hardware is halted. 284 + * @controller: the handle to the controller object to stop. 285 + * @timeout: This parameter specifies the number of milliseconds in which the 286 + * stop operation should complete. 287 + * 288 + * The controller must be in the STARTED or STOPPED state. Indicate if the 289 + * controller stop method succeeded or failed in some way. SCI_SUCCESS if the 290 + * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the 291 + * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the 292 + * controller is not either in the STARTED or STOPPED states. 293 + */ 294 + static enum sci_status scic_controller_stop(struct scic_sds_controller *scic, 295 + u32 timeout) 296 + { 297 + if (scic->state_machine.current_state_id != 298 + SCI_BASE_CONTROLLER_STATE_READY) { 299 + dev_warn(scic_to_dev(scic), 300 + "SCIC Controller stop operation requested in " 301 + "invalid state\n"); 302 + return SCI_FAILURE_INVALID_STATE; 303 + } 304 + 305 + isci_timer_start(scic->timeout_timer, timeout); 306 + sci_base_state_machine_change_state(&scic->state_machine, 307 + SCI_BASE_CONTROLLER_STATE_STOPPING); 308 + return SCI_SUCCESS; 309 + } 310 + 311 + /** 312 + * scic_controller_reset() - This method will reset the supplied core 313 + * controller regardless of the state of said controller. This operation is 314 + * considered destructive. In other words, all current operations are wiped 315 + * out. No IO completions for outstanding devices occur. Outstanding IO 316 + * requests are not aborted or completed at the actual remote device. 317 + * @controller: the handle to the controller object to reset. 318 + * 319 + * Indicate if the controller reset method succeeded or failed in some way. 320 + * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if 321 + * the controller reset operation is unable to complete. 322 + */ 323 + static enum sci_status scic_controller_reset(struct scic_sds_controller *scic) 324 + { 325 + switch (scic->state_machine.current_state_id) { 326 + case SCI_BASE_CONTROLLER_STATE_RESET: 327 + case SCI_BASE_CONTROLLER_STATE_READY: 328 + case SCI_BASE_CONTROLLER_STATE_STOPPED: 329 + case SCI_BASE_CONTROLLER_STATE_FAILED: 330 + /* 331 + * The reset operation is not a graceful cleanup, just 332 + * perform the state transition. 333 + */ 334 + sci_base_state_machine_change_state(&scic->state_machine, 335 + SCI_BASE_CONTROLLER_STATE_RESETTING); 336 + return SCI_SUCCESS; 337 + default: 338 + dev_warn(scic_to_dev(scic), 339 + "SCIC Controller reset operation requested in " 340 + "invalid state\n"); 341 + return SCI_FAILURE_INVALID_STATE; 342 + } 343 + } 344 + 1251 345 void isci_host_deinit(struct isci_host *ihost) 1252 346 { 1253 347 int i; ··· 1383 339 u->ssp_max_occupancy_timeout = ssp_max_occ_to; 1384 340 u->no_outbound_task_timeout = no_outbound_task_to; 1385 341 u->max_number_concurrent_device_spin_up = max_concurr_spinup; 342 + } 343 + 344 + static void scic_sds_controller_initial_state_enter(void *object) 345 + { 346 + struct scic_sds_controller *scic = object; 347 + 348 + sci_base_state_machine_change_state(&scic->state_machine, 349 + SCI_BASE_CONTROLLER_STATE_RESET); 350 + } 351 + 352 + static inline void scic_sds_controller_starting_state_exit(void *object) 353 + { 354 + struct scic_sds_controller *scic = object; 355 + 356 + isci_timer_stop(scic->timeout_timer); 357 + } 358 + 359 + #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 360 + #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 361 + #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 362 + #define INTERRUPT_COALESCE_NUMBER_MAX 256 363 + #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 364 + #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 365 + 366 + /** 367 + * scic_controller_set_interrupt_coalescence() - This method allows the user to 368 + * configure the interrupt coalescence. 369 + * @controller: This parameter represents the handle to the controller object 370 + * for which its interrupt coalesce register is overridden. 371 + * @coalesce_number: Used to control the number of entries in the Completion 372 + * Queue before an interrupt is generated. If the number of entries exceed 373 + * this number, an interrupt will be generated. The valid range of the input 374 + * is [0, 256]. A setting of 0 results in coalescing being disabled. 375 + * @coalesce_timeout: Timeout value in microseconds. The valid range of the 376 + * input is [0, 2700000] . A setting of 0 is allowed and results in no 377 + * interrupt coalescing timeout. 378 + * 379 + * Indicate if the user successfully set the interrupt coalesce parameters. 380 + * SCI_SUCCESS The user successfully updated the interrutp coalescence. 381 + * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. 382 + */ 383 + static enum sci_status scic_controller_set_interrupt_coalescence( 384 + struct scic_sds_controller *scic_controller, 385 + u32 coalesce_number, 386 + u32 coalesce_timeout) 387 + { 388 + u8 timeout_encode = 0; 389 + u32 min = 0; 390 + u32 max = 0; 391 + 392 + /* Check if the input parameters fall in the range. */ 393 + if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) 394 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 395 + 396 + /* 397 + * Defined encoding for interrupt coalescing timeout: 398 + * Value Min Max Units 399 + * ----- --- --- ----- 400 + * 0 - - Disabled 401 + * 1 13.3 20.0 ns 402 + * 2 26.7 40.0 403 + * 3 53.3 80.0 404 + * 4 106.7 160.0 405 + * 5 213.3 320.0 406 + * 6 426.7 640.0 407 + * 7 853.3 1280.0 408 + * 8 1.7 2.6 us 409 + * 9 3.4 5.1 410 + * 10 6.8 10.2 411 + * 11 13.7 20.5 412 + * 12 27.3 41.0 413 + * 13 54.6 81.9 414 + * 14 109.2 163.8 415 + * 15 218.5 327.7 416 + * 16 436.9 655.4 417 + * 17 873.8 1310.7 418 + * 18 1.7 2.6 ms 419 + * 19 3.5 5.2 420 + * 20 7.0 10.5 421 + * 21 14.0 21.0 422 + * 22 28.0 41.9 423 + * 23 55.9 83.9 424 + * 24 111.8 167.8 425 + * 25 223.7 335.5 426 + * 26 447.4 671.1 427 + * 27 894.8 1342.2 428 + * 28 1.8 2.7 s 429 + * Others Undefined */ 430 + 431 + /* 432 + * Use the table above to decide the encode of interrupt coalescing timeout 433 + * value for register writing. */ 434 + if (coalesce_timeout == 0) 435 + timeout_encode = 0; 436 + else{ 437 + /* make the timeout value in unit of (10 ns). */ 438 + coalesce_timeout = coalesce_timeout * 100; 439 + min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; 440 + max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; 441 + 442 + /* get the encode of timeout for register writing. */ 443 + for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; 444 + timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; 445 + timeout_encode++) { 446 + if (min <= coalesce_timeout && max > coalesce_timeout) 447 + break; 448 + else if (coalesce_timeout >= max && coalesce_timeout < min * 2 449 + && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { 450 + if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) 451 + break; 452 + else{ 453 + timeout_encode++; 454 + break; 455 + } 456 + } else { 457 + max = max * 2; 458 + min = min * 2; 459 + } 460 + } 461 + 462 + if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) 463 + /* the value is out of range. */ 464 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 465 + } 466 + 467 + writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | 468 + SMU_ICC_GEN_VAL(TIMER, timeout_encode), 469 + &scic_controller->smu_registers->interrupt_coalesce_control); 470 + 471 + 472 + scic_controller->interrupt_coalesce_number = (u16)coalesce_number; 473 + scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100; 474 + 475 + return SCI_SUCCESS; 476 + } 477 + 478 + 479 + static void scic_sds_controller_ready_state_enter(void *object) 480 + { 481 + struct scic_sds_controller *scic = object; 482 + 483 + /* set the default interrupt coalescence number and timeout value. */ 484 + scic_controller_set_interrupt_coalescence(scic, 0x10, 250); 485 + } 486 + 487 + static void scic_sds_controller_ready_state_exit(void *object) 488 + { 489 + struct scic_sds_controller *scic = object; 490 + 491 + /* disable interrupt coalescence. */ 492 + scic_controller_set_interrupt_coalescence(scic, 0, 0); 493 + } 494 + 495 + static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic) 496 + { 497 + u32 index; 498 + enum sci_status status; 499 + enum sci_status phy_status; 500 + struct isci_host *ihost = scic_to_ihost(scic); 501 + 502 + status = SCI_SUCCESS; 503 + 504 + for (index = 0; index < SCI_MAX_PHYS; index++) { 505 + phy_status = scic_sds_phy_stop(&ihost->phys[index].sci); 506 + 507 + if (phy_status != SCI_SUCCESS && 508 + phy_status != SCI_FAILURE_INVALID_STATE) { 509 + status = SCI_FAILURE; 510 + 511 + dev_warn(scic_to_dev(scic), 512 + "%s: Controller stop operation failed to stop " 513 + "phy %d because of status %d.\n", 514 + __func__, 515 + ihost->phys[index].sci.phy_index, phy_status); 516 + } 517 + } 518 + 519 + return status; 520 + } 521 + 522 + static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic) 523 + { 524 + u32 index; 525 + enum sci_status port_status; 526 + enum sci_status status = SCI_SUCCESS; 527 + struct isci_host *ihost = scic_to_ihost(scic); 528 + 529 + for (index = 0; index < scic->logical_port_entries; index++) { 530 + struct scic_sds_port *sci_port = &ihost->ports[index].sci; 531 + scic_sds_port_handler_t stop; 532 + 533 + stop = sci_port->state_handlers->stop_handler; 534 + port_status = stop(sci_port); 535 + 536 + if ((port_status != SCI_SUCCESS) && 537 + (port_status != SCI_FAILURE_INVALID_STATE)) { 538 + status = SCI_FAILURE; 539 + 540 + dev_warn(scic_to_dev(scic), 541 + "%s: Controller stop operation failed to " 542 + "stop port %d because of status %d.\n", 543 + __func__, 544 + sci_port->logical_port_index, 545 + port_status); 546 + } 547 + } 548 + 549 + return status; 550 + } 551 + 552 + static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic) 553 + { 554 + u32 index; 555 + enum sci_status status; 556 + enum sci_status device_status; 557 + 558 + status = SCI_SUCCESS; 559 + 560 + for (index = 0; index < scic->remote_node_entries; index++) { 561 + if (scic->device_table[index] != NULL) { 562 + /* / @todo What timeout value do we want to provide to this request? */ 563 + device_status = scic_remote_device_stop(scic->device_table[index], 0); 564 + 565 + if ((device_status != SCI_SUCCESS) && 566 + (device_status != SCI_FAILURE_INVALID_STATE)) { 567 + dev_warn(scic_to_dev(scic), 568 + "%s: Controller stop operation failed " 569 + "to stop device 0x%p because of " 570 + "status %d.\n", 571 + __func__, 572 + scic->device_table[index], device_status); 573 + } 574 + } 575 + } 576 + 577 + return status; 578 + } 579 + 580 + static void scic_sds_controller_stopping_state_enter(void *object) 581 + { 582 + struct scic_sds_controller *scic = object; 583 + 584 + /* Stop all of the components for this controller */ 585 + scic_sds_controller_stop_phys(scic); 586 + scic_sds_controller_stop_ports(scic); 587 + scic_sds_controller_stop_devices(scic); 588 + } 589 + 590 + static void scic_sds_controller_stopping_state_exit(void *object) 591 + { 592 + struct scic_sds_controller *scic = object; 593 + 594 + isci_timer_stop(scic->timeout_timer); 595 + } 596 + 597 + 598 + /** 599 + * scic_sds_controller_reset_hardware() - 600 + * 601 + * This method will reset the controller hardware. 602 + */ 603 + static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic) 604 + { 605 + /* Disable interrupts so we dont take any spurious interrupts */ 606 + scic_controller_disable_interrupts(scic); 607 + 608 + /* Reset the SCU */ 609 + writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control); 610 + 611 + /* Delay for 1ms to before clearing the CQP and UFQPR. */ 612 + udelay(1000); 613 + 614 + /* The write to the CQGR clears the CQP */ 615 + writel(0x00000000, &scic->smu_registers->completion_queue_get); 616 + 617 + /* The write to the UFQGP clears the UFQPR */ 618 + writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 619 + } 620 + 621 + static void scic_sds_controller_resetting_state_enter(void *object) 622 + { 623 + struct scic_sds_controller *scic = object; 624 + 625 + scic_sds_controller_reset_hardware(scic); 626 + sci_base_state_machine_change_state(&scic->state_machine, 627 + SCI_BASE_CONTROLLER_STATE_RESET); 628 + } 629 + 630 + static const struct sci_base_state scic_sds_controller_state_table[] = { 631 + [SCI_BASE_CONTROLLER_STATE_INITIAL] = { 632 + .enter_state = scic_sds_controller_initial_state_enter, 633 + }, 634 + [SCI_BASE_CONTROLLER_STATE_RESET] = {}, 635 + [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {}, 636 + [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {}, 637 + [SCI_BASE_CONTROLLER_STATE_STARTING] = { 638 + .exit_state = scic_sds_controller_starting_state_exit, 639 + }, 640 + [SCI_BASE_CONTROLLER_STATE_READY] = { 641 + .enter_state = scic_sds_controller_ready_state_enter, 642 + .exit_state = scic_sds_controller_ready_state_exit, 643 + }, 644 + [SCI_BASE_CONTROLLER_STATE_RESETTING] = { 645 + .enter_state = scic_sds_controller_resetting_state_enter, 646 + }, 647 + [SCI_BASE_CONTROLLER_STATE_STOPPING] = { 648 + .enter_state = scic_sds_controller_stopping_state_enter, 649 + .exit_state = scic_sds_controller_stopping_state_exit, 650 + }, 651 + [SCI_BASE_CONTROLLER_STATE_STOPPED] = {}, 652 + [SCI_BASE_CONTROLLER_STATE_FAILED] = {} 653 + }; 654 + 655 + static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic) 656 + { 657 + /* these defaults are overridden by the platform / firmware */ 658 + struct isci_host *ihost = scic_to_ihost(scic); 659 + u16 index; 660 + 661 + /* Default to APC mode. */ 662 + scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; 663 + 664 + /* Default to APC mode. */ 665 + scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1; 666 + 667 + /* Default to no SSC operation. */ 668 + scic->oem_parameters.sds1.controller.do_enable_ssc = false; 669 + 670 + /* Initialize all of the port parameter information to narrow ports. */ 671 + for (index = 0; index < SCI_MAX_PORTS; index++) { 672 + scic->oem_parameters.sds1.ports[index].phy_mask = 0; 673 + } 674 + 675 + /* Initialize all of the phy parameter information. */ 676 + for (index = 0; index < SCI_MAX_PHYS; index++) { 677 + /* Default to 6G (i.e. Gen 3) for now. */ 678 + scic->user_parameters.sds1.phys[index].max_speed_generation = 3; 679 + 680 + /* the frequencies cannot be 0 */ 681 + scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f; 682 + scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff; 683 + scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; 684 + 685 + /* 686 + * Previous Vitesse based expanders had a arbitration issue that 687 + * is worked around by having the upper 32-bits of SAS address 688 + * with a value greater then the Vitesse company identifier. 689 + * Hence, usage of 0x5FCFFFFF. */ 690 + scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id; 691 + scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF; 692 + } 693 + 694 + scic->user_parameters.sds1.stp_inactivity_timeout = 5; 695 + scic->user_parameters.sds1.ssp_inactivity_timeout = 5; 696 + scic->user_parameters.sds1.stp_max_occupancy_timeout = 5; 697 + scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20; 698 + scic->user_parameters.sds1.no_outbound_task_timeout = 20; 699 + } 700 + 701 + 702 + 703 + /** 704 + * scic_controller_construct() - This method will attempt to construct a 705 + * controller object utilizing the supplied parameter information. 706 + * @c: This parameter specifies the controller to be constructed. 707 + * @scu_base: mapped base address of the scu registers 708 + * @smu_base: mapped base address of the smu registers 709 + * 710 + * Indicate if the controller was successfully constructed or if it failed in 711 + * some way. SCI_SUCCESS This value is returned if the controller was 712 + * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned 713 + * if the interrupt coalescence timer may cause SAS compliance issues for SMP 714 + * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE 715 + * This value is returned if the controller does not support the supplied type. 716 + * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the 717 + * controller does not support the supplied initialization data version. 718 + */ 719 + static enum sci_status scic_controller_construct(struct scic_sds_controller *scic, 720 + void __iomem *scu_base, 721 + void __iomem *smu_base) 722 + { 723 + struct isci_host *ihost = scic_to_ihost(scic); 724 + u8 i; 725 + 726 + sci_base_state_machine_construct(&scic->state_machine, 727 + scic, scic_sds_controller_state_table, 728 + SCI_BASE_CONTROLLER_STATE_INITIAL); 729 + 730 + sci_base_state_machine_start(&scic->state_machine); 731 + 732 + scic->scu_registers = scu_base; 733 + scic->smu_registers = smu_base; 734 + 735 + scic_sds_port_configuration_agent_construct(&scic->port_agent); 736 + 737 + /* Construct the ports for this controller */ 738 + for (i = 0; i < SCI_MAX_PORTS; i++) 739 + scic_sds_port_construct(&ihost->ports[i].sci, i, scic); 740 + scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic); 741 + 742 + /* Construct the phys for this controller */ 743 + for (i = 0; i < SCI_MAX_PHYS; i++) { 744 + /* Add all the PHYs to the dummy port */ 745 + scic_sds_phy_construct(&ihost->phys[i].sci, 746 + &ihost->ports[SCI_MAX_PORTS].sci, i); 747 + } 748 + 749 + scic->invalid_phy_mask = 0; 750 + 751 + /* Set the default maximum values */ 752 + scic->completion_event_entries = SCU_EVENT_COUNT; 753 + scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT; 754 + scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; 755 + scic->logical_port_entries = SCI_MAX_PORTS; 756 + scic->task_context_entries = SCU_IO_REQUEST_COUNT; 757 + scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT; 758 + scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT; 759 + 760 + /* Initialize the User and OEM parameters to default values. */ 761 + scic_sds_controller_set_default_config_parameters(scic); 762 + 763 + return scic_controller_reset(scic); 764 + } 765 + 766 + int scic_oem_parameters_validate(struct scic_sds_oem_params *oem) 767 + { 768 + int i; 769 + 770 + for (i = 0; i < SCI_MAX_PORTS; i++) 771 + if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) 772 + return -EINVAL; 773 + 774 + for (i = 0; i < SCI_MAX_PHYS; i++) 775 + if (oem->phys[i].sas_address.high == 0 && 776 + oem->phys[i].sas_address.low == 0) 777 + return -EINVAL; 778 + 779 + if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { 780 + for (i = 0; i < SCI_MAX_PHYS; i++) 781 + if (oem->ports[i].phy_mask != 0) 782 + return -EINVAL; 783 + } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 784 + u8 phy_mask = 0; 785 + 786 + for (i = 0; i < SCI_MAX_PHYS; i++) 787 + phy_mask |= oem->ports[i].phy_mask; 788 + 789 + if (phy_mask == 0) 790 + return -EINVAL; 791 + } else 792 + return -EINVAL; 793 + 794 + if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT) 795 + return -EINVAL; 796 + 797 + return 0; 798 + } 799 + 800 + static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic, 801 + union scic_oem_parameters *scic_parms) 802 + { 803 + u32 state = scic->state_machine.current_state_id; 804 + 805 + if (state == SCI_BASE_CONTROLLER_STATE_RESET || 806 + state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || 807 + state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 808 + 809 + if (scic_oem_parameters_validate(&scic_parms->sds1)) 810 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 811 + scic->oem_parameters.sds1 = scic_parms->sds1; 812 + 813 + return SCI_SUCCESS; 814 + } 815 + 816 + return SCI_FAILURE_INVALID_STATE; 817 + } 818 + 819 + void scic_oem_parameters_get( 820 + struct scic_sds_controller *scic, 821 + union scic_oem_parameters *scic_parms) 822 + { 823 + memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms)); 824 + } 825 + 826 + static void scic_sds_controller_timeout_handler(void *_scic) 827 + { 828 + struct scic_sds_controller *scic = _scic; 829 + struct isci_host *ihost = scic_to_ihost(scic); 830 + struct sci_base_state_machine *sm = &scic->state_machine; 831 + 832 + if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING) 833 + scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT); 834 + else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) { 835 + sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED); 836 + isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); 837 + } else /* / @todo Now what do we want to do in this case? */ 838 + dev_err(scic_to_dev(scic), 839 + "%s: Controller timer fired when controller was not " 840 + "in a state being timed.\n", 841 + __func__); 842 + } 843 + 844 + static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic) 845 + { 846 + struct isci_host *ihost = scic_to_ihost(scic); 847 + 848 + scic->phy_startup_timer = isci_timer_create(ihost, 849 + scic, 850 + scic_sds_controller_phy_startup_timeout_handler); 851 + 852 + if (scic->phy_startup_timer == NULL) 853 + return SCI_FAILURE_INSUFFICIENT_RESOURCES; 854 + else { 855 + scic->next_phy_to_start = 0; 856 + scic->phy_startup_timer_pending = false; 857 + } 858 + 859 + return SCI_SUCCESS; 860 + } 861 + 862 + static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic) 863 + { 864 + isci_timer_start(scic->power_control.timer, 865 + SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 866 + 867 + scic->power_control.timer_started = true; 868 + } 869 + 870 + static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic) 871 + { 872 + if (scic->power_control.timer_started) { 873 + isci_timer_stop(scic->power_control.timer); 874 + scic->power_control.timer_started = false; 875 + } 876 + } 877 + 878 + static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic) 879 + { 880 + scic_sds_controller_power_control_timer_stop(scic); 881 + scic_sds_controller_power_control_timer_start(scic); 882 + } 883 + 884 + static void scic_sds_controller_power_control_timer_handler( 885 + void *controller) 886 + { 887 + struct scic_sds_controller *scic; 888 + 889 + scic = (struct scic_sds_controller *)controller; 890 + 891 + scic->power_control.phys_granted_power = 0; 892 + 893 + if (scic->power_control.phys_waiting == 0) { 894 + scic->power_control.timer_started = false; 895 + } else { 896 + struct scic_sds_phy *sci_phy = NULL; 897 + u8 i; 898 + 899 + for (i = 0; 900 + (i < SCI_MAX_PHYS) 901 + && (scic->power_control.phys_waiting != 0); 902 + i++) { 903 + if (scic->power_control.requesters[i] != NULL) { 904 + if (scic->power_control.phys_granted_power < 905 + scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { 906 + sci_phy = scic->power_control.requesters[i]; 907 + scic->power_control.requesters[i] = NULL; 908 + scic->power_control.phys_waiting--; 909 + scic->power_control.phys_granted_power++; 910 + scic_sds_phy_consume_power_handler(sci_phy); 911 + } else { 912 + break; 913 + } 914 + } 915 + } 916 + 917 + /* 918 + * It doesn't matter if the power list is empty, we need to start the 919 + * timer in case another phy becomes ready. 920 + */ 921 + scic_sds_controller_power_control_timer_start(scic); 922 + } 923 + } 924 + 925 + /** 926 + * This method inserts the phy in the stagger spinup control queue. 927 + * @scic: 928 + * 929 + * 930 + */ 931 + void scic_sds_controller_power_control_queue_insert( 932 + struct scic_sds_controller *scic, 933 + struct scic_sds_phy *sci_phy) 934 + { 935 + BUG_ON(sci_phy == NULL); 936 + 937 + if (scic->power_control.phys_granted_power < 938 + scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) { 939 + scic->power_control.phys_granted_power++; 940 + scic_sds_phy_consume_power_handler(sci_phy); 941 + 942 + /* 943 + * stop and start the power_control timer. When the timer fires, the 944 + * no_of_phys_granted_power will be set to 0 945 + */ 946 + scic_sds_controller_power_control_timer_restart(scic); 947 + } else { 948 + /* Add the phy in the waiting list */ 949 + scic->power_control.requesters[sci_phy->phy_index] = sci_phy; 950 + scic->power_control.phys_waiting++; 951 + } 952 + } 953 + 954 + /** 955 + * This method removes the phy from the stagger spinup control queue. 956 + * @scic: 957 + * 958 + * 959 + */ 960 + void scic_sds_controller_power_control_queue_remove( 961 + struct scic_sds_controller *scic, 962 + struct scic_sds_phy *sci_phy) 963 + { 964 + BUG_ON(sci_phy == NULL); 965 + 966 + if (scic->power_control.requesters[sci_phy->phy_index] != NULL) { 967 + scic->power_control.phys_waiting--; 968 + } 969 + 970 + scic->power_control.requesters[sci_phy->phy_index] = NULL; 971 + } 972 + 973 + #define AFE_REGISTER_WRITE_DELAY 10 974 + 975 + /* Initialize the AFE for this phy index. We need to read the AFE setup from 976 + * the OEM parameters 977 + */ 978 + static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic) 979 + { 980 + const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1; 981 + u32 afe_status; 982 + u32 phy_id; 983 + 984 + /* Clear DFX Status registers */ 985 + writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0); 986 + udelay(AFE_REGISTER_WRITE_DELAY); 987 + 988 + if (is_b0()) { 989 + /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 990 + * Timer, PM Stagger Timer */ 991 + writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2); 992 + udelay(AFE_REGISTER_WRITE_DELAY); 993 + } 994 + 995 + /* Configure bias currents to normal */ 996 + if (is_a0()) 997 + writel(0x00005500, &scic->scu_registers->afe.afe_bias_control); 998 + else if (is_a2()) 999 + writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control); 1000 + else if (is_b0()) 1001 + writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control); 1002 + 1003 + udelay(AFE_REGISTER_WRITE_DELAY); 1004 + 1005 + /* Enable PLL */ 1006 + if (is_b0()) 1007 + writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0); 1008 + else 1009 + writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0); 1010 + 1011 + udelay(AFE_REGISTER_WRITE_DELAY); 1012 + 1013 + /* Wait for the PLL to lock */ 1014 + do { 1015 + afe_status = readl(&scic->scu_registers->afe.afe_common_block_status); 1016 + udelay(AFE_REGISTER_WRITE_DELAY); 1017 + } while ((afe_status & 0x00001000) == 0); 1018 + 1019 + if (is_a0() || is_a2()) { 1020 + /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 1021 + writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0); 1022 + udelay(AFE_REGISTER_WRITE_DELAY); 1023 + } 1024 + 1025 + for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 1026 + const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 1027 + 1028 + if (is_b0()) { 1029 + /* Configure transmitter SSC parameters */ 1030 + writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 1031 + udelay(AFE_REGISTER_WRITE_DELAY); 1032 + } else { 1033 + /* 1034 + * All defaults, except the Receive Word Alignament/Comma Detect 1035 + * Enable....(0xe800) */ 1036 + writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 1037 + udelay(AFE_REGISTER_WRITE_DELAY); 1038 + 1039 + writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 1040 + udelay(AFE_REGISTER_WRITE_DELAY); 1041 + } 1042 + 1043 + /* 1044 + * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 1045 + * & increase TX int & ext bias 20%....(0xe85c) */ 1046 + if (is_a0()) 1047 + writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1048 + else if (is_a2()) 1049 + writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1050 + else { 1051 + /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 1052 + writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1053 + udelay(AFE_REGISTER_WRITE_DELAY); 1054 + 1055 + /* 1056 + * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 1057 + * & increase TX int & ext bias 20%....(0xe85c) */ 1058 + writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 1059 + } 1060 + udelay(AFE_REGISTER_WRITE_DELAY); 1061 + 1062 + if (is_a0() || is_a2()) { 1063 + /* Enable TX equalization (0xe824) */ 1064 + writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 1065 + udelay(AFE_REGISTER_WRITE_DELAY); 1066 + } 1067 + 1068 + /* 1069 + * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 1070 + * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 1071 + writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 1072 + udelay(AFE_REGISTER_WRITE_DELAY); 1073 + 1074 + /* Leave DFE/FFE on */ 1075 + if (is_a0()) 1076 + writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 1077 + else if (is_a2()) 1078 + writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 1079 + else { 1080 + writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 1081 + udelay(AFE_REGISTER_WRITE_DELAY); 1082 + /* Enable TX equalization (0xe824) */ 1083 + writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 1084 + } 1085 + udelay(AFE_REGISTER_WRITE_DELAY); 1086 + 1087 + writel(oem_phy->afe_tx_amp_control0, 1088 + &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); 1089 + udelay(AFE_REGISTER_WRITE_DELAY); 1090 + 1091 + writel(oem_phy->afe_tx_amp_control1, 1092 + &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); 1093 + udelay(AFE_REGISTER_WRITE_DELAY); 1094 + 1095 + writel(oem_phy->afe_tx_amp_control2, 1096 + &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); 1097 + udelay(AFE_REGISTER_WRITE_DELAY); 1098 + 1099 + writel(oem_phy->afe_tx_amp_control3, 1100 + &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); 1101 + udelay(AFE_REGISTER_WRITE_DELAY); 1102 + } 1103 + 1104 + /* Transfer control to the PEs */ 1105 + writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0); 1106 + udelay(AFE_REGISTER_WRITE_DELAY); 1107 + } 1108 + 1109 + static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic, 1110 + enum sci_controller_mode operating_mode) 1111 + { 1112 + enum sci_status status = SCI_SUCCESS; 1113 + 1114 + if ((scic->state_machine.current_state_id == 1115 + SCI_BASE_CONTROLLER_STATE_INITIALIZING) || 1116 + (scic->state_machine.current_state_id == 1117 + SCI_BASE_CONTROLLER_STATE_INITIALIZED)) { 1118 + switch (operating_mode) { 1119 + case SCI_MODE_SPEED: 1120 + scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES; 1121 + scic->task_context_entries = SCU_IO_REQUEST_COUNT; 1122 + scic->uf_control.buffers.count = 1123 + SCU_UNSOLICITED_FRAME_COUNT; 1124 + scic->completion_event_entries = SCU_EVENT_COUNT; 1125 + scic->completion_queue_entries = 1126 + SCU_COMPLETION_QUEUE_COUNT; 1127 + break; 1128 + 1129 + case SCI_MODE_SIZE: 1130 + scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES; 1131 + scic->task_context_entries = SCI_MIN_IO_REQUESTS; 1132 + scic->uf_control.buffers.count = 1133 + SCU_MIN_UNSOLICITED_FRAMES; 1134 + scic->completion_event_entries = SCU_MIN_EVENTS; 1135 + scic->completion_queue_entries = 1136 + SCU_MIN_COMPLETION_QUEUE_ENTRIES; 1137 + break; 1138 + 1139 + default: 1140 + status = SCI_FAILURE_INVALID_PARAMETER_VALUE; 1141 + break; 1142 + } 1143 + } else 1144 + status = SCI_FAILURE_INVALID_STATE; 1145 + 1146 + return status; 1147 + } 1148 + 1149 + static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic) 1150 + { 1151 + struct isci_host *ihost = scic_to_ihost(scic); 1152 + scic->power_control.timer = isci_timer_create(ihost, 1153 + scic, 1154 + scic_sds_controller_power_control_timer_handler); 1155 + 1156 + memset(scic->power_control.requesters, 0, 1157 + sizeof(scic->power_control.requesters)); 1158 + 1159 + scic->power_control.phys_waiting = 0; 1160 + scic->power_control.phys_granted_power = 0; 1161 + } 1162 + 1163 + static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic) 1164 + { 1165 + struct sci_base_state_machine *sm = &scic->state_machine; 1166 + enum sci_status result = SCI_SUCCESS; 1167 + struct isci_host *ihost = scic_to_ihost(scic); 1168 + u32 index, state; 1169 + 1170 + if (scic->state_machine.current_state_id != 1171 + SCI_BASE_CONTROLLER_STATE_RESET) { 1172 + dev_warn(scic_to_dev(scic), 1173 + "SCIC Controller initialize operation requested " 1174 + "in invalid state\n"); 1175 + return SCI_FAILURE_INVALID_STATE; 1176 + } 1177 + 1178 + sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING); 1179 + 1180 + scic->timeout_timer = isci_timer_create(ihost, scic, 1181 + scic_sds_controller_timeout_handler); 1182 + 1183 + scic_sds_controller_initialize_phy_startup(scic); 1184 + 1185 + scic_sds_controller_initialize_power_control(scic); 1186 + 1187 + /* 1188 + * There is nothing to do here for B0 since we do not have to 1189 + * program the AFE registers. 1190 + * / @todo The AFE settings are supposed to be correct for the B0 but 1191 + * / presently they seem to be wrong. */ 1192 + scic_sds_controller_afe_initialization(scic); 1193 + 1194 + if (result == SCI_SUCCESS) { 1195 + u32 status; 1196 + u32 terminate_loop; 1197 + 1198 + /* Take the hardware out of reset */ 1199 + writel(0, &scic->smu_registers->soft_reset_control); 1200 + 1201 + /* 1202 + * / @todo Provide meaningfull error code for hardware failure 1203 + * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ 1204 + result = SCI_FAILURE; 1205 + terminate_loop = 100; 1206 + 1207 + while (terminate_loop-- && (result != SCI_SUCCESS)) { 1208 + /* Loop until the hardware reports success */ 1209 + udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); 1210 + status = readl(&scic->smu_registers->control_status); 1211 + 1212 + if ((status & SCU_RAM_INIT_COMPLETED) == 1213 + SCU_RAM_INIT_COMPLETED) 1214 + result = SCI_SUCCESS; 1215 + } 1216 + } 1217 + 1218 + if (result == SCI_SUCCESS) { 1219 + u32 max_supported_ports; 1220 + u32 max_supported_devices; 1221 + u32 max_supported_io_requests; 1222 + u32 device_context_capacity; 1223 + 1224 + /* 1225 + * Determine what are the actaul device capacities that the 1226 + * hardware will support */ 1227 + device_context_capacity = 1228 + readl(&scic->smu_registers->device_context_capacity); 1229 + 1230 + 1231 + max_supported_ports = smu_dcc_get_max_ports(device_context_capacity); 1232 + max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity); 1233 + max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity); 1234 + 1235 + /* 1236 + * Make all PEs that are unassigned match up with the 1237 + * logical ports 1238 + */ 1239 + for (index = 0; index < max_supported_ports; index++) { 1240 + struct scu_port_task_scheduler_group_registers __iomem 1241 + *ptsg = &scic->scu_registers->peg0.ptsg; 1242 + 1243 + writel(index, &ptsg->protocol_engine[index]); 1244 + } 1245 + 1246 + /* Record the smaller of the two capacity values */ 1247 + scic->logical_port_entries = 1248 + min(max_supported_ports, scic->logical_port_entries); 1249 + 1250 + scic->task_context_entries = 1251 + min(max_supported_io_requests, 1252 + scic->task_context_entries); 1253 + 1254 + scic->remote_node_entries = 1255 + min(max_supported_devices, scic->remote_node_entries); 1256 + 1257 + /* 1258 + * Now that we have the correct hardware reported minimum values 1259 + * build the MDL for the controller. Default to a performance 1260 + * configuration. 1261 + */ 1262 + scic_controller_set_mode(scic, SCI_MODE_SPEED); 1263 + } 1264 + 1265 + /* Initialize hardware PCI Relaxed ordering in DMA engines */ 1266 + if (result == SCI_SUCCESS) { 1267 + u32 dma_configuration; 1268 + 1269 + /* Configure the payload DMA */ 1270 + dma_configuration = 1271 + readl(&scic->scu_registers->sdma.pdma_configuration); 1272 + dma_configuration |= 1273 + SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 1274 + writel(dma_configuration, 1275 + &scic->scu_registers->sdma.pdma_configuration); 1276 + 1277 + /* Configure the control DMA */ 1278 + dma_configuration = 1279 + readl(&scic->scu_registers->sdma.cdma_configuration); 1280 + dma_configuration |= 1281 + SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 1282 + writel(dma_configuration, 1283 + &scic->scu_registers->sdma.cdma_configuration); 1284 + } 1285 + 1286 + /* 1287 + * Initialize the PHYs before the PORTs because the PHY registers 1288 + * are accessed during the port initialization. 1289 + */ 1290 + if (result == SCI_SUCCESS) { 1291 + /* Initialize the phys */ 1292 + for (index = 0; 1293 + (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS); 1294 + index++) { 1295 + result = scic_sds_phy_initialize( 1296 + &ihost->phys[index].sci, 1297 + &scic->scu_registers->peg0.pe[index].tl, 1298 + &scic->scu_registers->peg0.pe[index].ll); 1299 + } 1300 + } 1301 + 1302 + if (result == SCI_SUCCESS) { 1303 + /* Initialize the logical ports */ 1304 + for (index = 0; 1305 + (index < scic->logical_port_entries) && 1306 + (result == SCI_SUCCESS); 1307 + index++) { 1308 + result = scic_sds_port_initialize( 1309 + &ihost->ports[index].sci, 1310 + &scic->scu_registers->peg0.ptsg.port[index], 1311 + &scic->scu_registers->peg0.ptsg.protocol_engine, 1312 + &scic->scu_registers->peg0.viit[index]); 1313 + } 1314 + } 1315 + 1316 + if (result == SCI_SUCCESS) 1317 + result = scic_sds_port_configuration_agent_initialize( 1318 + scic, 1319 + &scic->port_agent); 1320 + 1321 + /* Advance the controller state machine */ 1322 + if (result == SCI_SUCCESS) 1323 + state = SCI_BASE_CONTROLLER_STATE_INITIALIZED; 1324 + else 1325 + state = SCI_BASE_CONTROLLER_STATE_FAILED; 1326 + sci_base_state_machine_change_state(sm, state); 1327 + 1328 + return result; 1329 + } 1330 + 1331 + static enum sci_status scic_user_parameters_set( 1332 + struct scic_sds_controller *scic, 1333 + union scic_user_parameters *scic_parms) 1334 + { 1335 + u32 state = scic->state_machine.current_state_id; 1336 + 1337 + if (state == SCI_BASE_CONTROLLER_STATE_RESET || 1338 + state == SCI_BASE_CONTROLLER_STATE_INITIALIZING || 1339 + state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) { 1340 + u16 index; 1341 + 1342 + /* 1343 + * Validate the user parameters. If they are not legal, then 1344 + * return a failure. 1345 + */ 1346 + for (index = 0; index < SCI_MAX_PHYS; index++) { 1347 + struct sci_phy_user_params *user_phy; 1348 + 1349 + user_phy = &scic_parms->sds1.phys[index]; 1350 + 1351 + if (!((user_phy->max_speed_generation <= 1352 + SCIC_SDS_PARM_MAX_SPEED) && 1353 + (user_phy->max_speed_generation > 1354 + SCIC_SDS_PARM_NO_SPEED))) 1355 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1356 + 1357 + if (user_phy->in_connection_align_insertion_frequency < 1358 + 3) 1359 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1360 + 1361 + if ((user_phy->in_connection_align_insertion_frequency < 1362 + 3) || 1363 + (user_phy->align_insertion_frequency == 0) || 1364 + (user_phy-> 1365 + notify_enable_spin_up_insertion_frequency == 1366 + 0)) 1367 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1368 + } 1369 + 1370 + if ((scic_parms->sds1.stp_inactivity_timeout == 0) || 1371 + (scic_parms->sds1.ssp_inactivity_timeout == 0) || 1372 + (scic_parms->sds1.stp_max_occupancy_timeout == 0) || 1373 + (scic_parms->sds1.ssp_max_occupancy_timeout == 0) || 1374 + (scic_parms->sds1.no_outbound_task_timeout == 0)) 1375 + return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1376 + 1377 + memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms)); 1378 + 1379 + return SCI_SUCCESS; 1380 + } 1381 + 1382 + return SCI_FAILURE_INVALID_STATE; 1383 + } 1384 + 1385 + static int scic_controller_mem_init(struct scic_sds_controller *scic) 1386 + { 1387 + struct device *dev = scic_to_dev(scic); 1388 + dma_addr_t dma_handle; 1389 + enum sci_status result; 1390 + 1391 + scic->completion_queue = dmam_alloc_coherent(dev, 1392 + scic->completion_queue_entries * sizeof(u32), 1393 + &dma_handle, GFP_KERNEL); 1394 + if (!scic->completion_queue) 1395 + return -ENOMEM; 1396 + 1397 + writel(lower_32_bits(dma_handle), 1398 + &scic->smu_registers->completion_queue_lower); 1399 + writel(upper_32_bits(dma_handle), 1400 + &scic->smu_registers->completion_queue_upper); 1401 + 1402 + scic->remote_node_context_table = dmam_alloc_coherent(dev, 1403 + scic->remote_node_entries * 1404 + sizeof(union scu_remote_node_context), 1405 + &dma_handle, GFP_KERNEL); 1406 + if (!scic->remote_node_context_table) 1407 + return -ENOMEM; 1408 + 1409 + writel(lower_32_bits(dma_handle), 1410 + &scic->smu_registers->remote_node_context_lower); 1411 + writel(upper_32_bits(dma_handle), 1412 + &scic->smu_registers->remote_node_context_upper); 1413 + 1414 + scic->task_context_table = dmam_alloc_coherent(dev, 1415 + scic->task_context_entries * 1416 + sizeof(struct scu_task_context), 1417 + &dma_handle, GFP_KERNEL); 1418 + if (!scic->task_context_table) 1419 + return -ENOMEM; 1420 + 1421 + writel(lower_32_bits(dma_handle), 1422 + &scic->smu_registers->host_task_table_lower); 1423 + writel(upper_32_bits(dma_handle), 1424 + &scic->smu_registers->host_task_table_upper); 1425 + 1426 + result = scic_sds_unsolicited_frame_control_construct(scic); 1427 + if (result) 1428 + return result; 1429 + 1430 + /* 1431 + * Inform the silicon as to the location of the UF headers and 1432 + * address table. 1433 + */ 1434 + writel(lower_32_bits(scic->uf_control.headers.physical_address), 1435 + &scic->scu_registers->sdma.uf_header_base_address_lower); 1436 + writel(upper_32_bits(scic->uf_control.headers.physical_address), 1437 + &scic->scu_registers->sdma.uf_header_base_address_upper); 1438 + 1439 + writel(lower_32_bits(scic->uf_control.address_table.physical_address), 1440 + &scic->scu_registers->sdma.uf_address_table_lower); 1441 + writel(upper_32_bits(scic->uf_control.address_table.physical_address), 1442 + &scic->scu_registers->sdma.uf_address_table_upper); 1443 + 1444 + return 0; 1386 1445 } 1387 1446 1388 1447 int isci_host_init(struct isci_host *isci_host) ··· 2600 453 2601 454 return 0; 2602 455 } 456 + 457 + void scic_sds_controller_link_up(struct scic_sds_controller *scic, 458 + struct scic_sds_port *port, struct scic_sds_phy *phy) 459 + { 460 + switch (scic->state_machine.current_state_id) { 461 + case SCI_BASE_CONTROLLER_STATE_STARTING: 462 + scic_sds_controller_phy_timer_stop(scic); 463 + scic->port_agent.link_up_handler(scic, &scic->port_agent, 464 + port, phy); 465 + scic_sds_controller_start_next_phy(scic); 466 + break; 467 + case SCI_BASE_CONTROLLER_STATE_READY: 468 + scic->port_agent.link_up_handler(scic, &scic->port_agent, 469 + port, phy); 470 + break; 471 + default: 472 + dev_dbg(scic_to_dev(scic), 473 + "%s: SCIC Controller linkup event from phy %d in " 474 + "unexpected state %d\n", __func__, phy->phy_index, 475 + scic->state_machine.current_state_id); 476 + } 477 + } 478 + 479 + void scic_sds_controller_link_down(struct scic_sds_controller *scic, 480 + struct scic_sds_port *port, struct scic_sds_phy *phy) 481 + { 482 + switch (scic->state_machine.current_state_id) { 483 + case SCI_BASE_CONTROLLER_STATE_STARTING: 484 + case SCI_BASE_CONTROLLER_STATE_READY: 485 + scic->port_agent.link_down_handler(scic, &scic->port_agent, 486 + port, phy); 487 + break; 488 + default: 489 + dev_dbg(scic_to_dev(scic), 490 + "%s: SCIC Controller linkdown event from phy %d in " 491 + "unexpected state %d\n", 492 + __func__, 493 + phy->phy_index, 494 + scic->state_machine.current_state_id); 495 + } 496 + } 497 + 498 + /** 499 + * This is a helper method to determine if any remote devices on this 500 + * controller are still in the stopping state. 501 + * 502 + */ 503 + static bool scic_sds_controller_has_remote_devices_stopping( 504 + struct scic_sds_controller *controller) 505 + { 506 + u32 index; 507 + 508 + for (index = 0; index < controller->remote_node_entries; index++) { 509 + if ((controller->device_table[index] != NULL) && 510 + (controller->device_table[index]->state_machine.current_state_id 511 + == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING)) 512 + return true; 513 + } 514 + 515 + return false; 516 + } 517 + 518 + /** 519 + * This method is called by the remote device to inform the controller 520 + * object that the remote device has stopped. 521 + */ 522 + void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic, 523 + struct scic_sds_remote_device *sci_dev) 524 + { 525 + if (scic->state_machine.current_state_id != 526 + SCI_BASE_CONTROLLER_STATE_STOPPING) { 527 + dev_dbg(scic_to_dev(scic), 528 + "SCIC Controller 0x%p remote device stopped event " 529 + "from device 0x%p in unexpected state %d\n", 530 + scic, sci_dev, 531 + scic->state_machine.current_state_id); 532 + return; 533 + } 534 + 535 + if (!scic_sds_controller_has_remote_devices_stopping(scic)) { 536 + sci_base_state_machine_change_state(&scic->state_machine, 537 + SCI_BASE_CONTROLLER_STATE_STOPPED); 538 + } 539 + } 540 + 541 + /** 542 + * This method will write to the SCU PCP register the request value. The method 543 + * is used to suspend/resume ports, devices, and phys. 544 + * @scic: 545 + * 546 + * 547 + */ 548 + void scic_sds_controller_post_request( 549 + struct scic_sds_controller *scic, 550 + u32 request) 551 + { 552 + dev_dbg(scic_to_dev(scic), 553 + "%s: SCIC Controller 0x%p post request 0x%08x\n", 554 + __func__, 555 + scic, 556 + request); 557 + 558 + writel(request, &scic->smu_registers->post_context_port); 559 + } 560 + 561 + /** 562 + * This method will copy the soft copy of the task context into the physical 563 + * memory accessible by the controller. 564 + * @scic: This parameter specifies the controller for which to copy 565 + * the task context. 566 + * @sci_req: This parameter specifies the request for which the task 567 + * context is being copied. 568 + * 569 + * After this call is made the SCIC_SDS_IO_REQUEST object will always point to 570 + * the physical memory version of the task context. Thus, all subsequent 571 + * updates to the task context are performed in the TC table (i.e. DMAable 572 + * memory). none 573 + */ 574 + void scic_sds_controller_copy_task_context( 575 + struct scic_sds_controller *scic, 576 + struct scic_sds_request *sci_req) 577 + { 578 + struct scu_task_context *task_context_buffer; 579 + 580 + task_context_buffer = scic_sds_controller_get_task_context_buffer( 581 + scic, sci_req->io_tag); 582 + 583 + memcpy(task_context_buffer, 584 + sci_req->task_context_buffer, 585 + offsetof(struct scu_task_context, sgl_snapshot_ac)); 586 + 587 + /* 588 + * Now that the soft copy of the TC has been copied into the TC 589 + * table accessible by the silicon. Thus, any further changes to 590 + * the TC (e.g. TC termination) occur in the appropriate location. */ 591 + sci_req->task_context_buffer = task_context_buffer; 592 + } 593 + 594 + /** 595 + * This method returns the task context buffer for the given io tag. 596 + * @scic: 597 + * @io_tag: 598 + * 599 + * struct scu_task_context* 600 + */ 601 + struct scu_task_context *scic_sds_controller_get_task_context_buffer( 602 + struct scic_sds_controller *scic, 603 + u16 io_tag 604 + ) { 605 + u16 task_index = scic_sds_io_tag_get_index(io_tag); 606 + 607 + if (task_index < scic->task_context_entries) { 608 + return &scic->task_context_table[task_index]; 609 + } 610 + 611 + return NULL; 612 + } 613 + 614 + struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, 615 + u16 io_tag) 616 + { 617 + u16 task_index; 618 + u16 task_sequence; 619 + 620 + task_index = scic_sds_io_tag_get_index(io_tag); 621 + 622 + if (task_index < scic->task_context_entries) { 623 + if (scic->io_request_table[task_index] != NULL) { 624 + task_sequence = scic_sds_io_tag_get_sequence(io_tag); 625 + 626 + if (task_sequence == scic->io_request_sequence[task_index]) { 627 + return scic->io_request_table[task_index]; 628 + } 629 + } 630 + } 631 + 632 + return NULL; 633 + } 634 + 635 + /** 636 + * This method allocates remote node index and the reserves the remote node 637 + * context space for use. This method can fail if there are no more remote 638 + * node index available. 639 + * @scic: This is the controller object which contains the set of 640 + * free remote node ids 641 + * @sci_dev: This is the device object which is requesting the a remote node 642 + * id 643 + * @node_id: This is the remote node id that is assinged to the device if one 644 + * is available 645 + * 646 + * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote 647 + * node index available. 648 + */ 649 + enum sci_status scic_sds_controller_allocate_remote_node_context( 650 + struct scic_sds_controller *scic, 651 + struct scic_sds_remote_device *sci_dev, 652 + u16 *node_id) 653 + { 654 + u16 node_index; 655 + u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); 656 + 657 + node_index = scic_sds_remote_node_table_allocate_remote_node( 658 + &scic->available_remote_nodes, remote_node_count 659 + ); 660 + 661 + if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 662 + scic->device_table[node_index] = sci_dev; 663 + 664 + *node_id = node_index; 665 + 666 + return SCI_SUCCESS; 667 + } 668 + 669 + return SCI_FAILURE_INSUFFICIENT_RESOURCES; 670 + } 671 + 672 + /** 673 + * This method frees the remote node index back to the available pool. Once 674 + * this is done the remote node context buffer is no longer valid and can 675 + * not be used. 676 + * @scic: 677 + * @sci_dev: 678 + * @node_id: 679 + * 680 + */ 681 + void scic_sds_controller_free_remote_node_context( 682 + struct scic_sds_controller *scic, 683 + struct scic_sds_remote_device *sci_dev, 684 + u16 node_id) 685 + { 686 + u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev); 687 + 688 + if (scic->device_table[node_id] == sci_dev) { 689 + scic->device_table[node_id] = NULL; 690 + 691 + scic_sds_remote_node_table_release_remote_node_index( 692 + &scic->available_remote_nodes, remote_node_count, node_id 693 + ); 694 + } 695 + } 696 + 697 + /** 698 + * This method returns the union scu_remote_node_context for the specified remote 699 + * node id. 700 + * @scic: 701 + * @node_id: 702 + * 703 + * union scu_remote_node_context* 704 + */ 705 + union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( 706 + struct scic_sds_controller *scic, 707 + u16 node_id 708 + ) { 709 + if ( 710 + (node_id < scic->remote_node_entries) 711 + && (scic->device_table[node_id] != NULL) 712 + ) { 713 + return &scic->remote_node_context_table[node_id]; 714 + } 715 + 716 + return NULL; 717 + } 718 + 719 + /** 720 + * 721 + * @resposne_buffer: This is the buffer into which the D2H register FIS will be 722 + * constructed. 723 + * @frame_header: This is the frame header returned by the hardware. 724 + * @frame_buffer: This is the frame buffer returned by the hardware. 725 + * 726 + * This method will combind the frame header and frame buffer to create a SATA 727 + * D2H register FIS none 728 + */ 729 + void scic_sds_controller_copy_sata_response( 730 + void *response_buffer, 731 + void *frame_header, 732 + void *frame_buffer) 733 + { 734 + memcpy(response_buffer, frame_header, sizeof(u32)); 735 + 736 + memcpy(response_buffer + sizeof(u32), 737 + frame_buffer, 738 + sizeof(struct dev_to_host_fis) - sizeof(u32)); 739 + } 740 + 741 + /** 742 + * This method releases the frame once this is done the frame is available for 743 + * re-use by the hardware. The data contained in the frame header and frame 744 + * buffer is no longer valid. The UF queue get pointer is only updated if UF 745 + * control indicates this is appropriate. 746 + * @scic: 747 + * @frame_index: 748 + * 749 + */ 750 + void scic_sds_controller_release_frame( 751 + struct scic_sds_controller *scic, 752 + u32 frame_index) 753 + { 754 + if (scic_sds_unsolicited_frame_control_release_frame( 755 + &scic->uf_control, frame_index) == true) 756 + writel(scic->uf_control.get, 757 + &scic->scu_registers->sdma.unsolicited_frame_get_pointer); 758 + } 759 + 760 + /** 761 + * scic_controller_start_io() - This method is called by the SCI user to 762 + * send/start an IO request. If the method invocation is successful, then 763 + * the IO request has been queued to the hardware for processing. 764 + * @controller: the handle to the controller object for which to start an IO 765 + * request. 766 + * @remote_device: the handle to the remote device object for which to start an 767 + * IO request. 768 + * @io_request: the handle to the io request object to start. 769 + * @io_tag: This parameter specifies a previously allocated IO tag that the 770 + * user desires to be utilized for this request. This parameter is optional. 771 + * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value 772 + * for this parameter. 773 + * 774 + * - IO tags are a protected resource. It is incumbent upon the SCI Core user 775 + * to ensure that each of the methods that may allocate or free available IO 776 + * tags are handled in a mutually exclusive manner. This method is one of said 777 + * methods requiring proper critical code section protection (e.g. semaphore, 778 + * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a 779 + * result, it is expected the user will have set the NCQ tag field in the host 780 + * to device register FIS prior to calling this method. There is also a 781 + * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking 782 + * the scic_controller_start_io() method. scic_controller_allocate_tag() for 783 + * more information on allocating a tag. Indicate if the controller 784 + * successfully started the IO request. SCI_SUCCESS if the IO request was 785 + * successfully started. Determine the failure situations and return values. 786 + */ 787 + enum sci_status scic_controller_start_io( 788 + struct scic_sds_controller *scic, 789 + struct scic_sds_remote_device *rdev, 790 + struct scic_sds_request *req, 791 + u16 io_tag) 792 + { 793 + enum sci_status status; 794 + 795 + if (scic->state_machine.current_state_id != 796 + SCI_BASE_CONTROLLER_STATE_READY) { 797 + dev_warn(scic_to_dev(scic), "invalid state to start I/O"); 798 + return SCI_FAILURE_INVALID_STATE; 799 + } 800 + 801 + status = scic_sds_remote_device_start_io(scic, rdev, req); 802 + if (status != SCI_SUCCESS) 803 + return status; 804 + 805 + scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 806 + scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req)); 807 + return SCI_SUCCESS; 808 + } 809 + 810 + /** 811 + * scic_controller_terminate_request() - This method is called by the SCI Core 812 + * user to terminate an ongoing (i.e. started) core IO request. This does 813 + * not abort the IO request at the target, but rather removes the IO request 814 + * from the host controller. 815 + * @controller: the handle to the controller object for which to terminate a 816 + * request. 817 + * @remote_device: the handle to the remote device object for which to 818 + * terminate a request. 819 + * @request: the handle to the io or task management request object to 820 + * terminate. 821 + * 822 + * Indicate if the controller successfully began the terminate process for the 823 + * IO request. SCI_SUCCESS if the terminate process was successfully started 824 + * for the request. Determine the failure situations and return values. 825 + */ 826 + enum sci_status scic_controller_terminate_request( 827 + struct scic_sds_controller *scic, 828 + struct scic_sds_remote_device *rdev, 829 + struct scic_sds_request *req) 830 + { 831 + enum sci_status status; 832 + 833 + if (scic->state_machine.current_state_id != 834 + SCI_BASE_CONTROLLER_STATE_READY) { 835 + dev_warn(scic_to_dev(scic), 836 + "invalid state to terminate request\n"); 837 + return SCI_FAILURE_INVALID_STATE; 838 + } 839 + 840 + status = scic_sds_io_request_terminate(req); 841 + if (status != SCI_SUCCESS) 842 + return status; 843 + 844 + /* 845 + * Utilize the original post context command and or in the POST_TC_ABORT 846 + * request sub-type. 847 + */ 848 + scic_sds_controller_post_request(scic, 849 + scic_sds_request_get_post_context(req) | 850 + SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 851 + return SCI_SUCCESS; 852 + } 853 + 854 + /** 855 + * scic_controller_complete_io() - This method will perform core specific 856 + * completion operations for an IO request. After this method is invoked, 857 + * the user should consider the IO request as invalid until it is properly 858 + * reused (i.e. re-constructed). 859 + * @controller: The handle to the controller object for which to complete the 860 + * IO request. 861 + * @remote_device: The handle to the remote device object for which to complete 862 + * the IO request. 863 + * @io_request: the handle to the io request object to complete. 864 + * 865 + * - IO tags are a protected resource. It is incumbent upon the SCI Core user 866 + * to ensure that each of the methods that may allocate or free available IO 867 + * tags are handled in a mutually exclusive manner. This method is one of said 868 + * methods requiring proper critical code section protection (e.g. semaphore, 869 + * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI 870 + * Core user, using the scic_controller_allocate_io_tag() method, then it is 871 + * the responsibility of the caller to invoke the scic_controller_free_io_tag() 872 + * method to free the tag (i.e. this method will not free the IO tag). Indicate 873 + * if the controller successfully completed the IO request. SCI_SUCCESS if the 874 + * completion process was successful. 875 + */ 876 + enum sci_status scic_controller_complete_io( 877 + struct scic_sds_controller *scic, 878 + struct scic_sds_remote_device *rdev, 879 + struct scic_sds_request *request) 880 + { 881 + enum sci_status status; 882 + u16 index; 883 + 884 + switch (scic->state_machine.current_state_id) { 885 + case SCI_BASE_CONTROLLER_STATE_STOPPING: 886 + /* XXX: Implement this function */ 887 + return SCI_FAILURE; 888 + case SCI_BASE_CONTROLLER_STATE_READY: 889 + status = scic_sds_remote_device_complete_io(scic, rdev, request); 890 + if (status != SCI_SUCCESS) 891 + return status; 892 + 893 + index = scic_sds_io_tag_get_index(request->io_tag); 894 + scic->io_request_table[index] = NULL; 895 + return SCI_SUCCESS; 896 + default: 897 + dev_warn(scic_to_dev(scic), "invalid state to complete I/O"); 898 + return SCI_FAILURE_INVALID_STATE; 899 + } 900 + 901 + } 902 + 903 + enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req) 904 + { 905 + struct scic_sds_controller *scic = sci_req->owning_controller; 906 + 907 + if (scic->state_machine.current_state_id != 908 + SCI_BASE_CONTROLLER_STATE_READY) { 909 + dev_warn(scic_to_dev(scic), "invalid state to continue I/O"); 910 + return SCI_FAILURE_INVALID_STATE; 911 + } 912 + 913 + scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req; 914 + scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req)); 915 + return SCI_SUCCESS; 916 + } 917 + 918 + /** 919 + * scic_controller_start_task() - This method is called by the SCIC user to 920 + * send/start a framework task management request. 921 + * @controller: the handle to the controller object for which to start the task 922 + * management request. 923 + * @remote_device: the handle to the remote device object for which to start 924 + * the task management request. 925 + * @task_request: the handle to the task request object to start. 926 + * @io_tag: This parameter specifies a previously allocated IO tag that the 927 + * user desires to be utilized for this request. Note this not the io_tag 928 + * of the request being managed. It is to be utilized for the task request 929 + * itself. This parameter is optional. The user is allowed to supply 930 + * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter. 931 + * 932 + * - IO tags are a protected resource. It is incumbent upon the SCI Core user 933 + * to ensure that each of the methods that may allocate or free available IO 934 + * tags are handled in a mutually exclusive manner. This method is one of said 935 + * methods requiring proper critical code section protection (e.g. semaphore, 936 + * spin-lock, etc.). - The user must synchronize this task with completion 937 + * queue processing. If they are not synchronized then it is possible for the 938 + * io requests that are being managed by the task request can complete before 939 + * starting the task request. scic_controller_allocate_tag() for more 940 + * information on allocating a tag. Indicate if the controller successfully 941 + * started the IO request. SCI_TASK_SUCCESS if the task request was 942 + * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is 943 + * returned if there is/are task(s) outstanding that require termination or 944 + * completion before this request can succeed. 945 + */ 946 + enum sci_task_status scic_controller_start_task( 947 + struct scic_sds_controller *scic, 948 + struct scic_sds_remote_device *rdev, 949 + struct scic_sds_request *req, 950 + u16 task_tag) 951 + { 952 + enum sci_status status; 953 + 954 + if (scic->state_machine.current_state_id != 955 + SCI_BASE_CONTROLLER_STATE_READY) { 956 + dev_warn(scic_to_dev(scic), 957 + "%s: SCIC Controller starting task from invalid " 958 + "state\n", 959 + __func__); 960 + return SCI_TASK_FAILURE_INVALID_STATE; 961 + } 962 + 963 + status = scic_sds_remote_device_start_task(scic, rdev, req); 964 + switch (status) { 965 + case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 966 + scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 967 + 968 + /* 969 + * We will let framework know this task request started successfully, 970 + * although core is still woring on starting the request (to post tc when 971 + * RNC is resumed.) 972 + */ 973 + return SCI_SUCCESS; 974 + case SCI_SUCCESS: 975 + scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req; 976 + 977 + scic_sds_controller_post_request(scic, 978 + scic_sds_request_get_post_context(req)); 979 + break; 980 + default: 981 + break; 982 + } 983 + 984 + return status; 985 + } 986 + 987 + /** 988 + * scic_controller_allocate_io_tag() - This method will allocate a tag from the 989 + * pool of free IO tags. Direct allocation of IO tags by the SCI Core user 990 + * is optional. The scic_controller_start_io() method will allocate an IO 991 + * tag if this method is not utilized and the tag is not supplied to the IO 992 + * construct routine. Direct allocation of IO tags may provide additional 993 + * performance improvements in environments capable of supporting this usage 994 + * model. Additionally, direct allocation of IO tags also provides 995 + * additional flexibility to the SCI Core user. Specifically, the user may 996 + * retain IO tags across the lives of multiple IO requests. 997 + * @controller: the handle to the controller object for which to allocate the 998 + * tag. 999 + * 1000 + * IO tags are a protected resource. It is incumbent upon the SCI Core user to 1001 + * ensure that each of the methods that may allocate or free available IO tags 1002 + * are handled in a mutually exclusive manner. This method is one of said 1003 + * methods requiring proper critical code section protection (e.g. semaphore, 1004 + * spin-lock, etc.). An unsigned integer representing an available IO tag. 1005 + * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no 1006 + * currently available tags to be allocated. All return other values indicate a 1007 + * legitimate tag. 1008 + */ 1009 + u16 scic_controller_allocate_io_tag( 1010 + struct scic_sds_controller *scic) 1011 + { 1012 + u16 task_context; 1013 + u16 sequence_count; 1014 + 1015 + if (!sci_pool_empty(scic->tci_pool)) { 1016 + sci_pool_get(scic->tci_pool, task_context); 1017 + 1018 + sequence_count = scic->io_request_sequence[task_context]; 1019 + 1020 + return scic_sds_io_tag_construct(sequence_count, task_context); 1021 + } 1022 + 1023 + return SCI_CONTROLLER_INVALID_IO_TAG; 1024 + } 1025 + 1026 + /** 1027 + * scic_controller_free_io_tag() - This method will free an IO tag to the pool 1028 + * of free IO tags. This method provides the SCI Core user more flexibility 1029 + * with regards to IO tags. The user may desire to keep an IO tag after an 1030 + * IO request has completed, because they plan on re-using the tag for a 1031 + * subsequent IO request. This method is only legal if the tag was 1032 + * allocated via scic_controller_allocate_io_tag(). 1033 + * @controller: This parameter specifies the handle to the controller object 1034 + * for which to free/return the tag. 1035 + * @io_tag: This parameter represents the tag to be freed to the pool of 1036 + * available tags. 1037 + * 1038 + * - IO tags are a protected resource. It is incumbent upon the SCI Core user 1039 + * to ensure that each of the methods that may allocate or free available IO 1040 + * tags are handled in a mutually exclusive manner. This method is one of said 1041 + * methods requiring proper critical code section protection (e.g. semaphore, 1042 + * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI 1043 + * Core user, using the scic_controller_allocate_io_tag() method, then it is 1044 + * the responsibility of the caller to invoke this method to free the tag. This 1045 + * method returns an indication of whether the tag was successfully put back 1046 + * (freed) to the pool of available tags. SCI_SUCCESS This return value 1047 + * indicates the tag was successfully placed into the pool of available IO 1048 + * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag 1049 + * is not a valid IO tag value. 1050 + */ 1051 + enum sci_status scic_controller_free_io_tag( 1052 + struct scic_sds_controller *scic, 1053 + u16 io_tag) 1054 + { 1055 + u16 sequence; 1056 + u16 index; 1057 + 1058 + BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG); 1059 + 1060 + sequence = scic_sds_io_tag_get_sequence(io_tag); 1061 + index = scic_sds_io_tag_get_index(io_tag); 1062 + 1063 + if (!sci_pool_full(scic->tci_pool)) { 1064 + if (sequence == scic->io_request_sequence[index]) { 1065 + scic_sds_io_sequence_increment( 1066 + scic->io_request_sequence[index]); 1067 + 1068 + sci_pool_put(scic->tci_pool, index); 1069 + 1070 + return SCI_SUCCESS; 1071 + } 1072 + } 1073 + 1074 + return SCI_FAILURE_INVALID_IO_TAG; 1075 + } 1076 + 1077 +
+576 -38
drivers/scsi/isci/host.h
··· 52 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 - 56 55 #ifndef _SCI_HOST_H_ 57 56 #define _SCI_HOST_H_ 58 57 59 - #include "scic_sds_controller.h" 58 + #include "scic_config_parameters.h" 60 59 #include "remote_device.h" 61 60 #include "phy.h" 61 + #include "pool.h" 62 + #include "sci_base_state_machine.h" 63 + #include "remote_node_table.h" 64 + #include "scu_registers.h" 65 + #include "scu_unsolicited_frame.h" 66 + #include "scic_sds_unsolicited_frame_control.h" 67 + #include "scic_sds_port_configuration_agent.h" 68 + 69 + struct scic_sds_request; 70 + struct scu_task_context; 71 + 72 + /** 73 + * struct scic_power_control - 74 + * 75 + * This structure defines the fields for managing power control for direct 76 + * attached disk devices. 77 + */ 78 + struct scic_power_control { 79 + /** 80 + * This field is set when the power control timer is running and cleared when 81 + * it is not. 82 + */ 83 + bool timer_started; 84 + 85 + /** 86 + * This field is the handle to the driver timer object. This timer is used to 87 + * control when the directed attached disks can consume power. 88 + */ 89 + void *timer; 90 + 91 + /** 92 + * This field is used to keep track of how many phys are put into the 93 + * requesters field. 94 + */ 95 + u8 phys_waiting; 96 + 97 + /** 98 + * This field is used to keep track of how many phys have been granted to consume power 99 + */ 100 + u8 phys_granted_power; 101 + 102 + /** 103 + * This field is an array of phys that we are waiting on. The phys are direct 104 + * mapped into requesters via struct scic_sds_phy.phy_index 105 + */ 106 + struct scic_sds_phy *requesters[SCI_MAX_PHYS]; 107 + 108 + }; 109 + 110 + /** 111 + * struct scic_sds_controller - 112 + * 113 + * This structure represents the SCU controller object. 114 + */ 115 + struct scic_sds_controller { 116 + /** 117 + * This field contains the information for the base controller state 118 + * machine. 119 + */ 120 + struct sci_base_state_machine state_machine; 121 + 122 + /** 123 + * This field is the driver timer object handler used to time the controller 124 + * object start and stop requests. 125 + */ 126 + void *timeout_timer; 127 + 128 + /** 129 + * This field contains the user parameters to be utilized for this 130 + * core controller object. 131 + */ 132 + union scic_user_parameters user_parameters; 133 + 134 + /** 135 + * This field contains the OEM parameters to be utilized for this 136 + * core controller object. 137 + */ 138 + union scic_oem_parameters oem_parameters; 139 + 140 + /** 141 + * This field contains the port configuration agent for this controller. 142 + */ 143 + struct scic_sds_port_configuration_agent port_agent; 144 + 145 + /** 146 + * This field is the array of device objects that are currently constructed 147 + * for this controller object. This table is used as a fast lookup of device 148 + * objects that need to handle device completion notifications from the 149 + * hardware. The table is RNi based. 150 + */ 151 + struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES]; 152 + 153 + /** 154 + * This field is the array of IO request objects that are currently active for 155 + * this controller object. This table is used as a fast lookup of the io 156 + * request object that need to handle completion queue notifications. The 157 + * table is TCi based. 158 + */ 159 + struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS]; 160 + 161 + /** 162 + * This field is the free RNi data structure 163 + */ 164 + struct scic_remote_node_table available_remote_nodes; 165 + 166 + /** 167 + * This field is the TCi pool used to manage the task context index. 168 + */ 169 + SCI_POOL_CREATE(tci_pool, u16, SCI_MAX_IO_REQUESTS); 170 + 171 + /** 172 + * This filed is the struct scic_power_control data used to controll when direct 173 + * attached devices can consume power. 174 + */ 175 + struct scic_power_control power_control; 176 + 177 + /** 178 + * This field is the array of sequence values for the IO Tag fields. Even 179 + * though only 4 bits of the field is used for the sequence the sequence is 16 180 + * bits in size so the sequence can be bitwise or'd with the TCi to build the 181 + * IO Tag value. 182 + */ 183 + u16 io_request_sequence[SCI_MAX_IO_REQUESTS]; 184 + 185 + /** 186 + * This field in the array of sequence values for the RNi. These are used 187 + * to control io request build to io request start operations. The sequence 188 + * value is recorded into an io request when it is built and is checked on 189 + * the io request start operation to make sure that there was not a device 190 + * hot plug between the build and start operation. 191 + */ 192 + u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES]; 193 + 194 + /** 195 + * This field is a pointer to the memory allocated by the driver for the task 196 + * context table. This data is shared between the hardware and software. 197 + */ 198 + struct scu_task_context *task_context_table; 199 + 200 + /** 201 + * This field is a pointer to the memory allocated by the driver for the 202 + * remote node context table. This table is shared between the hardware and 203 + * software. 204 + */ 205 + union scu_remote_node_context *remote_node_context_table; 206 + 207 + /** 208 + * This field is a pointer to the completion queue. This memory is 209 + * written to by the hardware and read by the software. 210 + */ 211 + u32 *completion_queue; 212 + 213 + /** 214 + * This field is the software copy of the completion queue get pointer. The 215 + * controller object writes this value to the hardware after processing the 216 + * completion entries. 217 + */ 218 + u32 completion_queue_get; 219 + 220 + /** 221 + * This field is the minimum of the number of hardware supported port entries 222 + * and the software requested port entries. 223 + */ 224 + u32 logical_port_entries; 225 + 226 + /** 227 + * This field is the minimum number of hardware supported completion queue 228 + * entries and the software requested completion queue entries. 229 + */ 230 + u32 completion_queue_entries; 231 + 232 + /** 233 + * This field is the minimum number of hardware supported event entries and 234 + * the software requested event entries. 235 + */ 236 + u32 completion_event_entries; 237 + 238 + /** 239 + * This field is the minimum number of devices supported by the hardware and 240 + * the number of devices requested by the software. 241 + */ 242 + u32 remote_node_entries; 243 + 244 + /** 245 + * This field is the minimum number of IO requests supported by the hardware 246 + * and the number of IO requests requested by the software. 247 + */ 248 + u32 task_context_entries; 249 + 250 + /** 251 + * This object contains all of the unsolicited frame specific 252 + * data utilized by the core controller. 253 + */ 254 + struct scic_sds_unsolicited_frame_control uf_control; 255 + 256 + /* Phy Startup Data */ 257 + /** 258 + * This field is the driver timer handle for controller phy request startup. 259 + * On controller start the controller will start each PHY individually in 260 + * order of phy index. 261 + */ 262 + void *phy_startup_timer; 263 + 264 + /** 265 + * This field is set when the phy_startup_timer is running and is cleared when 266 + * the phy_startup_timer is stopped. 267 + */ 268 + bool phy_startup_timer_pending; 269 + 270 + /** 271 + * This field is the index of the next phy start. It is initialized to 0 and 272 + * increments for each phy index that is started. 273 + */ 274 + u32 next_phy_to_start; 275 + 276 + /** 277 + * This field controlls the invalid link up notifications to the SCI_USER. If 278 + * an invalid_link_up notification is reported a bit for the PHY index is set 279 + * so further notifications are not made. Once the PHY object reports link up 280 + * and is made part of a port then this bit for the PHY index is cleared. 281 + */ 282 + u8 invalid_phy_mask; 283 + 284 + /* 285 + * This field saves the current interrupt coalescing number of the controller. 286 + */ 287 + u16 interrupt_coalesce_number; 288 + 289 + /* 290 + * This field saves the current interrupt coalescing timeout value in microseconds. 291 + */ 292 + u32 interrupt_coalesce_timeout; 293 + 294 + /** 295 + * This field is a pointer to the memory mapped register space for the 296 + * struct smu_registers. 297 + */ 298 + struct smu_registers __iomem *smu_registers; 299 + 300 + /** 301 + * This field is a pointer to the memory mapped register space for the 302 + * struct scu_registers. 303 + */ 304 + struct scu_registers __iomem *scu_registers; 305 + 306 + }; 62 307 63 308 struct isci_host { 64 309 struct scic_sds_controller sci; ··· 338 93 }; 339 94 340 95 /** 96 + * enum scic_sds_controller_states - This enumeration depicts all the states 97 + * for the common controller state machine. 98 + */ 99 + enum scic_sds_controller_states { 100 + /** 101 + * Simply the initial state for the base controller state machine. 102 + */ 103 + SCI_BASE_CONTROLLER_STATE_INITIAL = 0, 104 + 105 + /** 106 + * This state indicates that the controller is reset. The memory for 107 + * the controller is in it's initial state, but the controller requires 108 + * initialization. 109 + * This state is entered from the INITIAL state. 110 + * This state is entered from the RESETTING state. 111 + */ 112 + SCI_BASE_CONTROLLER_STATE_RESET, 113 + 114 + /** 115 + * This state is typically an action state that indicates the controller 116 + * is in the process of initialization. In this state no new IO operations 117 + * are permitted. 118 + * This state is entered from the RESET state. 119 + */ 120 + SCI_BASE_CONTROLLER_STATE_INITIALIZING, 121 + 122 + /** 123 + * This state indicates that the controller has been successfully 124 + * initialized. In this state no new IO operations are permitted. 125 + * This state is entered from the INITIALIZING state. 126 + */ 127 + SCI_BASE_CONTROLLER_STATE_INITIALIZED, 128 + 129 + /** 130 + * This state indicates the the controller is in the process of becoming 131 + * ready (i.e. starting). In this state no new IO operations are permitted. 132 + * This state is entered from the INITIALIZED state. 133 + */ 134 + SCI_BASE_CONTROLLER_STATE_STARTING, 135 + 136 + /** 137 + * This state indicates the controller is now ready. Thus, the user 138 + * is able to perform IO operations on the controller. 139 + * This state is entered from the STARTING state. 140 + */ 141 + SCI_BASE_CONTROLLER_STATE_READY, 142 + 143 + /** 144 + * This state is typically an action state that indicates the controller 145 + * is in the process of resetting. Thus, the user is unable to perform 146 + * IO operations on the controller. A reset is considered destructive in 147 + * most cases. 148 + * This state is entered from the READY state. 149 + * This state is entered from the FAILED state. 150 + * This state is entered from the STOPPED state. 151 + */ 152 + SCI_BASE_CONTROLLER_STATE_RESETTING, 153 + 154 + /** 155 + * This state indicates that the controller is in the process of stopping. 156 + * In this state no new IO operations are permitted, but existing IO 157 + * operations are allowed to complete. 158 + * This state is entered from the READY state. 159 + */ 160 + SCI_BASE_CONTROLLER_STATE_STOPPING, 161 + 162 + /** 163 + * This state indicates that the controller has successfully been stopped. 164 + * In this state no new IO operations are permitted. 165 + * This state is entered from the STOPPING state. 166 + */ 167 + SCI_BASE_CONTROLLER_STATE_STOPPED, 168 + 169 + /** 170 + * This state indicates that the controller could not successfully be 171 + * initialized. In this state no new IO operations are permitted. 172 + * This state is entered from the INITIALIZING state. 173 + * This state is entered from the STARTING state. 174 + * This state is entered from the STOPPING state. 175 + * This state is entered from the RESETTING state. 176 + */ 177 + SCI_BASE_CONTROLLER_STATE_FAILED, 178 + 179 + SCI_BASE_CONTROLLER_MAX_STATES 180 + 181 + }; 182 + 183 + 184 + 185 + /** 341 186 * struct isci_pci_info - This class represents the pci function containing the 342 187 * controllers. Depending on PCI SKU, there could be up to 2 controllers in 343 188 * the PCI function. ··· 450 115 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ 451 116 ihost = to_pci_info(pdev)->hosts[++id]) 452 117 453 - static inline 454 - enum isci_status isci_host_get_state( 455 - struct isci_host *isci_host) 118 + static inline enum isci_status isci_host_get_state(struct isci_host *isci_host) 456 119 { 457 120 return isci_host->status; 458 121 } 459 122 460 - 461 - static inline void isci_host_change_state( 462 - struct isci_host *isci_host, 463 - enum isci_status status) 123 + static inline void isci_host_change_state(struct isci_host *isci_host, 124 + enum isci_status status) 464 125 { 465 126 unsigned long flags; 466 127 ··· 471 140 472 141 } 473 142 474 - static inline int isci_host_can_queue( 475 - struct isci_host *isci_host, 476 - int num) 143 + static inline int isci_host_can_queue(struct isci_host *isci_host, int num) 477 144 { 478 145 int ret = 0; 479 146 unsigned long flags; ··· 492 163 return ret; 493 164 } 494 165 495 - static inline void isci_host_can_dequeue( 496 - struct isci_host *isci_host, 497 - int num) 166 + static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num) 498 167 { 499 168 unsigned long flags; 500 169 ··· 535 208 } 536 209 537 210 /** 538 - * isci_host_scan_finished() - 211 + * INCREMENT_QUEUE_GET() - 539 212 * 540 - * This function is one of the SCSI Host Template functions. The SCSI midlayer 541 - * calls this function during a target scan, approx. once every 10 millisecs. 213 + * This macro will increment the specified index to and if the index wraps to 0 214 + * it will toggel the cycle bit. 542 215 */ 543 - int isci_host_scan_finished( 544 - struct Scsi_Host *, 545 - unsigned long); 546 - 216 + #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \ 217 + { \ 218 + if ((index) + 1 == entry_count) { \ 219 + (index) = 0; \ 220 + (cycle) = (cycle) ^ (bit_toggle); \ 221 + } else { \ 222 + index = index + 1; \ 223 + } \ 224 + } 547 225 548 226 /** 549 - * isci_host_scan_start() - 227 + * scic_sds_controller_get_port_configuration_agent() - 550 228 * 551 - * This function is one of the SCSI Host Template function, called by the SCSI 552 - * mid layer berfore a target scan begins. The core library controller start 553 - * routine is called from here. 229 + * This is a helper macro to get the port configuration agent from the 230 + * controller object. 554 231 */ 555 - void isci_host_scan_start( 556 - struct Scsi_Host *); 232 + #define scic_sds_controller_get_port_configuration_agent(controller) \ 233 + (&(controller)->port_agent) 557 234 558 235 /** 559 - * isci_host_start_complete() - 236 + * scic_sds_controller_get_protocol_engine_group() - 560 237 * 561 - * This function is called by the core library, through the ISCI Module, to 562 - * indicate controller start status. 238 + * This macro returns the protocol engine group for this controller object. 239 + * Presently we only support protocol engine group 0 so just return that 563 240 */ 564 - void isci_host_start_complete( 565 - struct isci_host *, 566 - enum sci_status); 241 + #define scic_sds_controller_get_protocol_engine_group(controller) 0 567 242 568 - void isci_host_stop_complete( 569 - struct isci_host *isci_host, 570 - enum sci_status completion_status); 243 + /** 244 + * scic_sds_io_tag_construct() - 245 + * 246 + * This macro constructs an IO tag from the sequence and index values. 247 + */ 248 + #define scic_sds_io_tag_construct(sequence, task_index) \ 249 + ((sequence) << 12 | (task_index)) 250 + 251 + /** 252 + * scic_sds_io_tag_get_sequence() - 253 + * 254 + * This macro returns the IO sequence from the IO tag value. 255 + */ 256 + #define scic_sds_io_tag_get_sequence(io_tag) \ 257 + (((io_tag) & 0xF000) >> 12) 258 + 259 + /** 260 + * scic_sds_io_tag_get_index() - 261 + * 262 + * This macro returns the TCi from the io tag value 263 + */ 264 + #define scic_sds_io_tag_get_index(io_tag) \ 265 + ((io_tag) & 0x0FFF) 266 + 267 + /** 268 + * scic_sds_io_sequence_increment() - 269 + * 270 + * This is a helper macro to increment the io sequence count. We may find in 271 + * the future that it will be faster to store the sequence count in such a way 272 + * as we dont perform the shift operation to build io tag values so therefore 273 + * need a way to incrment them correctly 274 + */ 275 + #define scic_sds_io_sequence_increment(value) \ 276 + ((value) = (((value) + 1) & 0x000F)) 277 + 278 + /* expander attached sata devices require 3 rnc slots */ 279 + static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev) 280 + { 281 + struct domain_device *dev = sci_dev_to_domain(sci_dev); 282 + 283 + if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && 284 + !sci_dev->is_direct_attached) 285 + return SCU_STP_REMOTE_NODE_COUNT; 286 + return SCU_SSP_REMOTE_NODE_COUNT; 287 + } 288 + 289 + /** 290 + * scic_sds_controller_set_invalid_phy() - 291 + * 292 + * This macro will set the bit in the invalid phy mask for this controller 293 + * object. This is used to control messages reported for invalid link up 294 + * notifications. 295 + */ 296 + #define scic_sds_controller_set_invalid_phy(controller, phy) \ 297 + ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index)) 298 + 299 + /** 300 + * scic_sds_controller_clear_invalid_phy() - 301 + * 302 + * This macro will clear the bit in the invalid phy mask for this controller 303 + * object. This is used to control messages reported for invalid link up 304 + * notifications. 305 + */ 306 + #define scic_sds_controller_clear_invalid_phy(controller, phy) \ 307 + ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) 308 + 309 + static inline struct device *scic_to_dev(struct scic_sds_controller *scic) 310 + { 311 + return &scic_to_ihost(scic)->pdev->dev; 312 + } 313 + 314 + static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy) 315 + { 316 + struct isci_phy *iphy = sci_phy_to_iphy(sci_phy); 317 + 318 + if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host) 319 + return NULL; 320 + 321 + return &iphy->isci_port->isci_host->pdev->dev; 322 + } 323 + 324 + static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port) 325 + { 326 + struct isci_port *iport = sci_port_to_iport(sci_port); 327 + 328 + if (!iport || !iport->isci_host) 329 + return NULL; 330 + 331 + return &iport->isci_host->pdev->dev; 332 + } 333 + 334 + static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev) 335 + { 336 + struct isci_remote_device *idev = 337 + container_of(sci_dev, typeof(*idev), sci); 338 + 339 + if (!idev || !idev->isci_port || !idev->isci_port->isci_host) 340 + return NULL; 341 + 342 + return &idev->isci_port->isci_host->pdev->dev; 343 + } 344 + 345 + enum { 346 + ISCI_SI_REVA0, 347 + ISCI_SI_REVA2, 348 + ISCI_SI_REVB0, 349 + }; 350 + 351 + extern int isci_si_rev; 352 + 353 + static inline bool is_a0(void) 354 + { 355 + return isci_si_rev == ISCI_SI_REVA0; 356 + } 357 + 358 + static inline bool is_a2(void) 359 + { 360 + return isci_si_rev == ISCI_SI_REVA2; 361 + } 362 + 363 + static inline bool is_b0(void) 364 + { 365 + return isci_si_rev > ISCI_SI_REVA2; 366 + } 367 + 368 + void scic_sds_controller_post_request(struct scic_sds_controller *scic, 369 + u32 request); 370 + void scic_sds_controller_release_frame(struct scic_sds_controller *scic, 371 + u32 frame_index); 372 + void scic_sds_controller_copy_sata_response(void *response_buffer, 373 + void *frame_header, 374 + void *frame_buffer); 375 + enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic, 376 + struct scic_sds_remote_device *sci_dev, 377 + u16 *node_id); 378 + void scic_sds_controller_free_remote_node_context( 379 + struct scic_sds_controller *scic, 380 + struct scic_sds_remote_device *sci_dev, 381 + u16 node_id); 382 + union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer( 383 + struct scic_sds_controller *scic, 384 + u16 node_id); 385 + 386 + struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, 387 + u16 io_tag); 388 + 389 + struct scu_task_context *scic_sds_controller_get_task_context_buffer( 390 + struct scic_sds_controller *scic, 391 + u16 io_tag); 392 + 393 + void scic_sds_controller_power_control_queue_insert( 394 + struct scic_sds_controller *scic, 395 + struct scic_sds_phy *sci_phy); 396 + 397 + void scic_sds_controller_power_control_queue_remove( 398 + struct scic_sds_controller *scic, 399 + struct scic_sds_phy *sci_phy); 400 + 401 + void scic_sds_controller_link_up( 402 + struct scic_sds_controller *scic, 403 + struct scic_sds_port *sci_port, 404 + struct scic_sds_phy *sci_phy); 405 + 406 + void scic_sds_controller_link_down( 407 + struct scic_sds_controller *scic, 408 + struct scic_sds_port *sci_port, 409 + struct scic_sds_phy *sci_phy); 410 + 411 + void scic_sds_controller_remote_device_stopped( 412 + struct scic_sds_controller *scic, 413 + struct scic_sds_remote_device *sci_dev); 414 + 415 + void scic_sds_controller_copy_task_context( 416 + struct scic_sds_controller *scic, 417 + struct scic_sds_request *this_request); 418 + 419 + void scic_sds_controller_register_setup(struct scic_sds_controller *scic); 420 + 421 + enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); 422 + int isci_host_scan_finished(struct Scsi_Host *, unsigned long); 423 + void isci_host_scan_start(struct Scsi_Host *); 571 424 572 425 int isci_host_init(struct isci_host *); 573 426 ··· 769 262 struct isci_remote_device *, 770 263 enum sci_status); 771 264 772 - #endif /* !defined(_SCI_HOST_H_) */ 265 + void scic_controller_disable_interrupts( 266 + struct scic_sds_controller *scic); 267 + 268 + enum sci_status scic_controller_start_io( 269 + struct scic_sds_controller *scic, 270 + struct scic_sds_remote_device *remote_device, 271 + struct scic_sds_request *io_request, 272 + u16 io_tag); 273 + 274 + enum sci_task_status scic_controller_start_task( 275 + struct scic_sds_controller *scic, 276 + struct scic_sds_remote_device *remote_device, 277 + struct scic_sds_request *task_request, 278 + u16 io_tag); 279 + 280 + enum sci_status scic_controller_terminate_request( 281 + struct scic_sds_controller *scic, 282 + struct scic_sds_remote_device *remote_device, 283 + struct scic_sds_request *request); 284 + 285 + enum sci_status scic_controller_complete_io( 286 + struct scic_sds_controller *scic, 287 + struct scic_sds_remote_device *remote_device, 288 + struct scic_sds_request *io_request); 289 + 290 + u16 scic_controller_allocate_io_tag( 291 + struct scic_sds_controller *scic); 292 + 293 + enum sci_status scic_controller_free_io_tag( 294 + struct scic_sds_controller *scic, 295 + u16 io_tag); 296 + #endif
-2
drivers/scsi/isci/init.c
··· 61 61 #include <asm/string.h> 62 62 #include "isci.h" 63 63 #include "task.h" 64 - #include "sci_environment.h" 65 64 #include "probe_roms.h" 66 - #include "scic_controller.h" 67 65 68 66 static struct scsi_transport_template *isci_transport_template; 69 67
-6
drivers/scsi/isci/isci.h
··· 532 532 irqreturn_t isci_msix_isr(int vec, void *data); 533 533 irqreturn_t isci_intx_isr(int vec, void *data); 534 534 irqreturn_t isci_error_isr(int vec, void *data); 535 - 536 - struct scic_sds_controller; 537 - bool scic_sds_controller_isr(struct scic_sds_controller *scic); 538 - void scic_sds_controller_completion_handler(struct scic_sds_controller *scic); 539 - bool scic_sds_controller_error_isr(struct scic_sds_controller *scic); 540 - void scic_sds_controller_error_handler(struct scic_sds_controller *scic); 541 535 #endif /* __ISCI_H__ */
-8
drivers/scsi/isci/port.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 - /** 57 - * This file contains the isci port implementation. 58 - * 59 - * 60 - */ 61 - 62 - 63 56 #include <linux/workqueue.h> 64 57 #include "isci.h" 65 58 #include "scic_io_request.h" ··· 61 68 #include "scic_port.h" 62 69 #include "port.h" 63 70 #include "request.h" 64 - #include "core/scic_sds_controller.h" 65 71 66 72 static void isci_port_change_state(struct isci_port *iport, enum isci_status status) 67 73 {
-1
drivers/scsi/isci/probe_roms.c
··· 32 32 33 33 #include "isci.h" 34 34 #include "task.h" 35 - #include "sci_environment.h" 36 35 #include "probe_roms.h" 37 36 38 37 struct efi_variable {
-3
drivers/scsi/isci/remote_device.c
··· 57 57 #include "port.h" 58 58 #include "remote_device.h" 59 59 #include "request.h" 60 - #include "scic_controller.h" 61 60 #include "scic_io_request.h" 62 61 #include "scic_phy.h" 63 62 #include "scic_port.h" 64 - #include "scic_sds_controller.h" 65 63 #include "scic_sds_phy.h" 66 64 #include "scic_sds_port.h" 67 65 #include "remote_node_context.h" 68 66 #include "scic_sds_request.h" 69 - #include "sci_environment.h" 70 67 #include "sci_util.h" 71 68 #include "scu_event_codes.h" 72 69 #include "task.h"
+1 -2
drivers/scsi/isci/remote_node_context.c
··· 53 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 54 */ 55 55 56 + #include "host.h" 56 57 #include "sci_base_state_machine.h" 57 - #include "scic_sds_controller.h" 58 58 #include "scic_sds_port.h" 59 59 #include "remote_device.h" 60 60 #include "remote_node_context.h" 61 - #include "sci_environment.h" 62 61 #include "sci_util.h" 63 62 #include "scu_event_codes.h" 64 63 #include "scu_task_context.h"
-1
drivers/scsi/isci/remote_node_table.c
··· 60 60 * 61 61 */ 62 62 #include "sci_util.h" 63 - #include "sci_environment.h" 64 63 #include "remote_node_table.h" 65 64 #include "remote_node_context.h" 66 65
-1
drivers/scsi/isci/request.c
··· 62 62 #include "sata.h" 63 63 #include "scu_completion_codes.h" 64 64 #include "scic_sds_request.h" 65 - #include "scic_controller.h" 66 65 #include "sas.h" 67 66 68 67 static enum sci_status isci_request_ssp_request_construct(
-122
drivers/scsi/isci/sci_environment.h
··· 1 - /* 2 - * This file is provided under a dual BSD/GPLv2 license. When using or 3 - * redistributing this file, you may do so under either license. 4 - * 5 - * GPL LICENSE SUMMARY 6 - * 7 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, write to the Free Software 20 - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 - * The full GNU General Public License is included in this distribution 22 - * in the file called LICENSE.GPL. 23 - * 24 - * BSD LICENSE 25 - * 26 - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 - * All rights reserved. 28 - * 29 - * Redistribution and use in source and binary forms, with or without 30 - * modification, are permitted provided that the following conditions 31 - * are met: 32 - * 33 - * * Redistributions of source code must retain the above copyright 34 - * notice, this list of conditions and the following disclaimer. 35 - * * Redistributions in binary form must reproduce the above copyright 36 - * notice, this list of conditions and the following disclaimer in 37 - * the documentation and/or other materials provided with the 38 - * distribution. 39 - * * Neither the name of Intel Corporation nor the names of its 40 - * contributors may be used to endorse or promote products derived 41 - * from this software without specific prior written permission. 42 - * 43 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 - */ 55 - 56 - #ifndef _SCI_ENVIRONMENT_H_ 57 - #define _SCI_ENVIRONMENT_H_ 58 - 59 - #include "host.h" 60 - 61 - 62 - static inline struct device *scic_to_dev(struct scic_sds_controller *scic) 63 - { 64 - return &scic_to_ihost(scic)->pdev->dev; 65 - } 66 - 67 - static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy) 68 - { 69 - struct isci_phy *iphy = sci_phy_to_iphy(sci_phy); 70 - 71 - if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host) 72 - return NULL; 73 - 74 - return &iphy->isci_port->isci_host->pdev->dev; 75 - } 76 - 77 - static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port) 78 - { 79 - struct isci_port *iport = sci_port_to_iport(sci_port); 80 - 81 - if (!iport || !iport->isci_host) 82 - return NULL; 83 - 84 - return &iport->isci_host->pdev->dev; 85 - } 86 - 87 - static inline struct device *scirdev_to_dev( 88 - struct scic_sds_remote_device *sci_dev) 89 - { 90 - struct isci_remote_device *idev = 91 - container_of(sci_dev, typeof(*idev), sci); 92 - 93 - if (!idev || !idev->isci_port || !idev->isci_port->isci_host) 94 - return NULL; 95 - 96 - return &idev->isci_port->isci_host->pdev->dev; 97 - } 98 - 99 - enum { 100 - ISCI_SI_REVA0, 101 - ISCI_SI_REVA2, 102 - ISCI_SI_REVB0, 103 - }; 104 - 105 - extern int isci_si_rev; 106 - 107 - static inline bool is_a0(void) 108 - { 109 - return isci_si_rev == ISCI_SI_REVA0; 110 - } 111 - 112 - static inline bool is_a2(void) 113 - { 114 - return isci_si_rev == ISCI_SI_REVA2; 115 - } 116 - 117 - static inline bool is_b0(void) 118 - { 119 - return isci_si_rev > ISCI_SI_REVA2; 120 - } 121 - 122 - #endif
-1
drivers/scsi/isci/task.c
··· 65 65 #include "sata.h" 66 66 #include "task.h" 67 67 #include "scic_sds_request.h" 68 - #include "scic_controller.h" 69 68 #include "timers.h" 70 69 71 70 /**