Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.29-rc4 565 lines 15 kB view raw
1/* 2 * Copyright (C) 2005 - 2008 ServerEngines 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@serverengines.com 12 * 13 * ServerEngines 14 * 209 N. Fair Oaks Ave 15 * Sunnyvale, CA 94085 16 */ 17#include "hwlib.h" 18#include "bestatus.h" 19 20 21int 22be_function_internal_query_firmware_config(struct be_function_object *pfob, 23 struct BE_FIRMWARE_CONFIG *config) 24{ 25 struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL; 26 struct MCC_WRB_AMAP *wrb = NULL; 27 int status = 0; 28 unsigned long irql; 29 struct be_mcc_wrb_response_copy rc; 30 31 spin_lock_irqsave(&pfob->post_lock, irql); 32 33 wrb = be_function_peek_mcc_wrb(pfob); 34 if (!wrb) { 35 TRACE(DL_ERR, "MCC wrb peek failed."); 36 status = BE_STATUS_NO_MCC_WRB; 37 goto error; 38 } 39 /* Prepares an embedded fwcmd, including request/response sizes. */ 40 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG); 41 42 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG, 43 params.response); 44 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG, 45 params.response); 46 rc.va = config; 47 48 /* Post the f/w command */ 49 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, 50 NULL, NULL, NULL, fwcmd, &rc); 51error: 52 spin_unlock_irqrestore(&pfob->post_lock, irql); 53 if (pfob->pend_queue_driving && pfob->mcc) { 54 pfob->pend_queue_driving = 0; 55 be_drive_mcc_wrb_queue(pfob->mcc); 56 } 57 return status; 58} 59 60/* 61 This allocates and initializes a function object based on the information 62 provided by upper layer drivers. 63 64 Returns BE_SUCCESS on success and an appropriate int on failure. 65 66 A function object represents a single BladeEngine (logical) PCI function. 67 That is a function object either represents 68 the networking side of BladeEngine or the iSCSI side of BladeEngine. 69 70 This routine will also detect and create an appropriate PD object for the 71 PCI function as needed. 72*/ 73int 74be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va, 75 u8 __iomem *pci_va, u32 function_type, 76 struct ring_desc *mailbox, struct be_function_object *pfob) 77{ 78 int status; 79 80 ASSERT(pfob); /* not a magic assert */ 81 ASSERT(function_type <= 2); 82 83 TRACE(DL_INFO, "Create function object. type:%s object:0x%p", 84 (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" : 85 (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" : 86 "Arm")), pfob); 87 88 memset(pfob, 0, sizeof(*pfob)); 89 90 pfob->type = function_type; 91 pfob->csr_va = csr_va; 92 pfob->db_va = db_va; 93 pfob->pci_va = pci_va; 94 95 spin_lock_init(&pfob->cq_lock); 96 spin_lock_init(&pfob->post_lock); 97 spin_lock_init(&pfob->mcc_context_lock); 98 99 100 pfob->pci_function_number = 1; 101 102 103 pfob->emulate = false; 104 TRACE(DL_NOTE, "Non-emulation mode"); 105 status = be_drive_POST(pfob); 106 if (status != BE_SUCCESS) { 107 TRACE(DL_ERR, "BladeEngine POST failed."); 108 goto error; 109 } 110 111 /* Initialize the mailbox */ 112 status = be_mpu_init_mailbox(pfob, mailbox); 113 if (status != BE_SUCCESS) { 114 TRACE(DL_ERR, "Failed to initialize mailbox."); 115 goto error; 116 } 117 /* 118 * Cache the firmware config for ASSERTs in hwclib and later 119 * driver queries. 120 */ 121 status = be_function_internal_query_firmware_config(pfob, 122 &pfob->fw_config); 123 if (status != BE_SUCCESS) { 124 TRACE(DL_ERR, "Failed to query firmware config."); 125 goto error; 126 } 127 128error: 129 if (status != BE_SUCCESS) { 130 /* No cleanup necessary */ 131 TRACE(DL_ERR, "Failed to create function."); 132 memset(pfob, 0, sizeof(*pfob)); 133 } 134 return status; 135} 136 137/* 138 This routine drops the reference count on a given function object. Once 139 the reference count falls to zero, the function object is destroyed and all 140 resources held are freed. 141 142 FunctionObject - The function object to drop the reference to. 143*/ 144int be_function_object_destroy(struct be_function_object *pfob) 145{ 146 TRACE(DL_INFO, "Destroy pfob. Object:0x%p", 147 pfob); 148 149 150 ASSERT(pfob->mcc == NULL); 151 152 return BE_SUCCESS; 153} 154 155int be_function_cleanup(struct be_function_object *pfob) 156{ 157 int status = 0; 158 u32 isr; 159 u32 host_intr; 160 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl; 161 162 163 if (pfob->type == BE_FUNCTION_TYPE_NETWORK) { 164 status = be_rxf_multicast_config(pfob, false, 0, 165 NULL, NULL, NULL, NULL); 166 ASSERT(status == BE_SUCCESS); 167 } 168 /* VLAN */ 169 status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL); 170 ASSERT(status == BE_SUCCESS); 171 /* 172 * MCC Queue -- Switches to mailbox mode. May want to destroy 173 * all but the MCC CQ before this call if polling CQ is much better 174 * performance than polling mailbox register. 175 */ 176 if (pfob->mcc) 177 status = be_mcc_ring_destroy(pfob->mcc); 178 /* 179 * If interrupts are disabled, clear any CEV interrupt assertions that 180 * fired after we stopped processing EQs. 181 */ 182 ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl); 183 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, 184 hostintr, ctrl.dw); 185 if (!host_intr) 186 if (pfob->type == BE_FUNCTION_TYPE_NETWORK) 187 isr = CSR_READ(pfob, cev.isr1); 188 else 189 isr = CSR_READ(pfob, cev.isr0); 190 else 191 /* This should never happen... */ 192 TRACE(DL_ERR, "function_cleanup called with interrupt enabled"); 193 /* Function object destroy */ 194 status = be_function_object_destroy(pfob); 195 ASSERT(status == BE_SUCCESS); 196 197 return status; 198} 199 200 201void * 202be_function_prepare_embedded_fwcmd(struct be_function_object *pfob, 203 struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length, 204 u32 response_length, u32 opcode, u32 subsystem) 205{ 206 struct FWCMD_REQUEST_HEADER *header = NULL; 207 u32 n; 208 209 ASSERT(wrb); 210 211 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; 212 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1); 213 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n)); 214 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n); 215 216 header->timeout = 0; 217 header->domain = 0; 218 header->request_length = max(request_length, response_length); 219 header->opcode = opcode; 220 header->subsystem = subsystem; 221 222 return header; 223} 224 225void * 226be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob, 227 struct MCC_WRB_AMAP *wrb, 228 void *fwcmd_va, u64 fwcmd_pa, 229 u32 payld_len, 230 u32 request_length, 231 u32 response_length, 232 u32 opcode, u32 subsystem) 233{ 234 struct FWCMD_REQUEST_HEADER *header = NULL; 235 u32 n; 236 struct MCC_WRB_PAYLOAD_AMAP *plp; 237 238 ASSERT(wrb); 239 ASSERT(fwcmd_va); 240 241 header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va; 242 243 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0); 244 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len); 245 246 /* 247 * Assume one fragment. The caller may override the SGL by 248 * rewriting the 0th length and adding more entries. They 249 * will also need to update the sge_count. 250 */ 251 AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1); 252 253 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; 254 plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n); 255 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len); 256 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa); 257 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp, 258 upper_32_bits(fwcmd_pa)); 259 260 header->timeout = 0; 261 header->domain = 0; 262 header->request_length = max(request_length, response_length); 263 header->opcode = opcode; 264 header->subsystem = subsystem; 265 266 return header; 267} 268 269struct MCC_WRB_AMAP * 270be_function_peek_mcc_wrb(struct be_function_object *pfob) 271{ 272 struct MCC_WRB_AMAP *wrb = NULL; 273 u32 offset; 274 275 if (pfob->mcc) 276 wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false); 277 else { 278 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8; 279 wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va + 280 offset); 281 } 282 283 if (wrb) 284 memset(wrb, 0, sizeof(struct MCC_WRB_AMAP)); 285 286 return wrb; 287} 288 289#if defined(BE_DEBUG) 290void be_function_debug_print_wrb(struct be_function_object *pfob, 291 struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va, 292 struct be_mcc_wrb_context *wrb_context) 293{ 294 295 struct FWCMD_REQUEST_HEADER *header = NULL; 296 u8 embedded; 297 u32 n; 298 299 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb); 300 301 if (embedded) { 302 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; 303 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n); 304 } else { 305 header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va; 306 } 307 308 /* Save the completed count before posting for a debug assert. */ 309 310 if (header) { 311 wrb_context->opcode = header->opcode; 312 wrb_context->subsystem = header->subsystem; 313 314 } else { 315 wrb_context->opcode = 0; 316 wrb_context->subsystem = 0; 317 } 318} 319#else 320#define be_function_debug_print_wrb(a_, b_, c_, d_) 321#endif 322 323int 324be_function_post_mcc_wrb(struct be_function_object *pfob, 325 struct MCC_WRB_AMAP *wrb, 326 struct be_generic_q_ctxt *q_ctxt, 327 mcc_wrb_cqe_callback cb, void *cb_context, 328 mcc_wrb_cqe_callback internal_cb, 329 void *internal_cb_context, void *optional_fwcmd_va, 330 struct be_mcc_wrb_response_copy *rc) 331{ 332 int status; 333 struct be_mcc_wrb_context *wrb_context = NULL; 334 u64 *p; 335 336 if (q_ctxt) { 337 /* Initialize context. */ 338 q_ctxt->context.internal_cb = internal_cb; 339 q_ctxt->context.internal_cb_context = internal_cb_context; 340 q_ctxt->context.cb = cb; 341 q_ctxt->context.cb_context = cb_context; 342 if (rc) { 343 q_ctxt->context.copy.length = rc->length; 344 q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset; 345 q_ctxt->context.copy.va = rc->va; 346 } else 347 q_ctxt->context.copy.length = 0; 348 349 q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va; 350 351 /* Queue this request */ 352 status = be_function_queue_mcc_wrb(pfob, q_ctxt); 353 354 goto Error; 355 } 356 /* 357 * Allocate a WRB context struct to hold the callback pointers, 358 * status, etc. This is required if commands complete out of order. 359 */ 360 wrb_context = _be_mcc_allocate_wrb_context(pfob); 361 if (!wrb_context) { 362 TRACE(DL_WARN, "Failed to allocate MCC WRB context."); 363 status = BE_STATUS_SYSTEM_RESOURCES; 364 goto Error; 365 } 366 /* Initialize context. */ 367 memset(wrb_context, 0, sizeof(*wrb_context)); 368 wrb_context->internal_cb = internal_cb; 369 wrb_context->internal_cb_context = internal_cb_context; 370 wrb_context->cb = cb; 371 wrb_context->cb_context = cb_context; 372 if (rc) { 373 wrb_context->copy.length = rc->length; 374 wrb_context->copy.fwcmd_offset = rc->fwcmd_offset; 375 wrb_context->copy.va = rc->va; 376 } else 377 wrb_context->copy.length = 0; 378 wrb_context->wrb = wrb; 379 380 /* 381 * Copy the context pointer into the WRB opaque tag field. 382 * Verify assumption of 64-bit tag with a compile time assert. 383 */ 384 p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8); 385 *p = (u64)(size_t)wrb_context; 386 387 /* Print info about this FWCMD for debug builds. */ 388 be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context); 389 390 /* 391 * issue the WRB to the MPU as appropriate 392 */ 393 if (pfob->mcc) { 394 /* 395 * we're in WRB mode, pass to the mcc layer 396 */ 397 status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context); 398 } else { 399 /* 400 * we're in mailbox mode 401 */ 402 status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context); 403 404 /* mailbox mode always completes synchronously */ 405 ASSERT(status != BE_STATUS_PENDING); 406 } 407 408Error: 409 410 return status; 411} 412 413int 414be_function_ring_destroy(struct be_function_object *pfob, 415 u32 id, u32 ring_type, mcc_wrb_cqe_callback cb, 416 void *cb_context, mcc_wrb_cqe_callback internal_cb, 417 void *internal_cb_context) 418{ 419 420 struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL; 421 struct MCC_WRB_AMAP *wrb = NULL; 422 int status = 0; 423 unsigned long irql; 424 425 spin_lock_irqsave(&pfob->post_lock, irql); 426 427 TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type); 428 429 wrb = be_function_peek_mcc_wrb(pfob); 430 if (!wrb) { 431 ASSERT(wrb); 432 TRACE(DL_ERR, "No free MCC WRBs in destroy ring."); 433 status = BE_STATUS_NO_MCC_WRB; 434 goto Error; 435 } 436 /* Prepares an embedded fwcmd, including request/response sizes. */ 437 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY); 438 439 fwcmd->params.request.id = id; 440 fwcmd->params.request.ring_type = ring_type; 441 442 /* Post the f/w command */ 443 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context, 444 internal_cb, internal_cb_context, fwcmd, NULL); 445 if (status != BE_SUCCESS && status != BE_PENDING) { 446 TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d", 447 id, ring_type); 448 goto Error; 449 } 450 451Error: 452 spin_unlock_irqrestore(&pfob->post_lock, irql); 453 if (pfob->pend_queue_driving && pfob->mcc) { 454 pfob->pend_queue_driving = 0; 455 be_drive_mcc_wrb_queue(pfob->mcc); 456 } 457 return status; 458} 459 460void 461be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num) 462{ 463 u32 num_pages = PAGES_SPANNED(rd->va, rd->length); 464 u32 i = 0; 465 u64 pa = rd->pa; 466 __le64 lepa; 467 468 ASSERT(pa_list); 469 ASSERT(pa); 470 471 for (i = 0; i < min(num_pages, max_num); i++) { 472 lepa = cpu_to_le64(pa); 473 pa_list[i].lo = (u32)lepa; 474 pa_list[i].hi = upper_32_bits(lepa); 475 pa += PAGE_SIZE; 476 } 477} 478 479 480 481/*----------------------------------------------------------------------------- 482 * Function: be_function_get_fw_version 483 * Retrieves the firmware version on the adpater. If the callback is 484 * NULL this call executes synchronously. If the callback is not NULL, 485 * the returned status will be BE_PENDING if the command was issued 486 * successfully. 487 * pfob - 488 * fwv - Pointer to response buffer if callback is NULL. 489 * cb - Callback function invoked when the FWCMD completes. 490 * cb_context - Passed to the callback function. 491 * return pend_status - BE_SUCCESS (0) on success. 492 * BE_PENDING (postive value) if the FWCMD 493 * completion is pending. Negative error code on failure. 494 *--------------------------------------------------------------------------- 495 */ 496int 497be_function_get_fw_version(struct be_function_object *pfob, 498 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv, 499 mcc_wrb_cqe_callback cb, void *cb_context) 500{ 501 int status = BE_SUCCESS; 502 struct MCC_WRB_AMAP *wrb = NULL; 503 struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL; 504 unsigned long irql; 505 struct be_mcc_wrb_response_copy rc; 506 507 spin_lock_irqsave(&pfob->post_lock, irql); 508 509 wrb = be_function_peek_mcc_wrb(pfob); 510 if (!wrb) { 511 TRACE(DL_ERR, "MCC wrb peek failed."); 512 status = BE_STATUS_NO_MCC_WRB; 513 goto Error; 514 } 515 516 if (!cb && !fwv) { 517 TRACE(DL_ERR, "callback and response buffer NULL!"); 518 status = BE_NOT_OK; 519 goto Error; 520 } 521 /* Prepares an embedded fwcmd, including request/response sizes. */ 522 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION); 523 524 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION, 525 params.response); 526 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION, 527 params.response); 528 rc.va = fwv; 529 530 /* Post the f/w command */ 531 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, 532 cb_context, NULL, NULL, fwcmd, &rc); 533 534Error: 535 spin_unlock_irqrestore(&pfob->post_lock, irql); 536 if (pfob->pend_queue_driving && pfob->mcc) { 537 pfob->pend_queue_driving = 0; 538 be_drive_mcc_wrb_queue(pfob->mcc); 539 } 540 return status; 541} 542 543int 544be_function_queue_mcc_wrb(struct be_function_object *pfob, 545 struct be_generic_q_ctxt *q_ctxt) 546{ 547 int status; 548 549 ASSERT(q_ctxt); 550 551 /* 552 * issue the WRB to the MPU as appropriate 553 */ 554 if (pfob->mcc) { 555 556 /* We're in ring mode. Queue this item. */ 557 pfob->mcc->backlog_length++; 558 list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog); 559 status = BE_PENDING; 560 } else { 561 status = BE_NOT_OK; 562 } 563 return status; 564} 565