Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35 402 lines 11 kB view raw
1/* 2 * linux/drivers/net/ehea/ehea_qmr.h 3 * 4 * eHEA ethernet device driver for IBM eServer System p 5 * 6 * (C) Copyright IBM Corp. 2006 7 * 8 * Authors: 9 * Christoph Raisch <raisch@de.ibm.com> 10 * Jan-Bernd Themann <themann@de.ibm.com> 11 * Thomas Klein <tklein@de.ibm.com> 12 * 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29#ifndef __EHEA_QMR_H__ 30#define __EHEA_QMR_H__ 31 32#include "ehea.h" 33#include "ehea_hw.h" 34 35/* 36 * page size of ehea hardware queues 37 */ 38 39#define EHEA_PAGESHIFT 12 40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) 41#define EHEA_SECTSIZE (1UL << 24) 42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) 43#define EHEA_HUGEPAGESHIFT 34 44#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT) 45#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) 46 47#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) 48#error eHEA module cannot work if kernel sectionsize < ehea sectionsize 49#endif 50 51/* Some abbreviations used here: 52 * 53 * WQE - Work Queue Entry 54 * SWQE - Send Work Queue Entry 55 * RWQE - Receive Work Queue Entry 56 * CQE - Completion Queue Entry 57 * EQE - Event Queue Entry 58 * MR - Memory Region 59 */ 60 61/* Use of WR_ID field for EHEA */ 62#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19) 63#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23) 64#define EHEA_SWQE2_TYPE 0x1 65#define EHEA_SWQE3_TYPE 0x2 66#define EHEA_RWQE2_TYPE 0x3 67#define EHEA_RWQE3_TYPE 0x4 68#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47) 69#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63) 70 71struct ehea_vsgentry { 72 u64 vaddr; 73 u32 l_key; 74 u32 len; 75}; 76 77/* maximum number of sg entries allowed in a WQE */ 78#define EHEA_MAX_WQE_SG_ENTRIES 252 79#define SWQE2_MAX_IMM (0xD0 - 0x30) 80#define SWQE3_MAX_IMM 224 81 82/* tx control flags for swqe */ 83#define EHEA_SWQE_CRC 0x8000 84#define EHEA_SWQE_IP_CHECKSUM 0x4000 85#define EHEA_SWQE_TCP_CHECKSUM 0x2000 86#define EHEA_SWQE_TSO 0x1000 87#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800 88#define EHEA_SWQE_VLAN_INSERT 0x0400 89#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200 90#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100 91#define EHEA_SWQE_WRAP_CTL_REC 0x0080 92#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040 93#define EHEA_SWQE_BIND 0x0020 94#define EHEA_SWQE_PURGE 0x0010 95 96/* sizeof(struct ehea_swqe) less the union */ 97#define SWQE_HEADER_SIZE 32 98 99struct ehea_swqe { 100 u64 wr_id; 101 u16 tx_control; 102 u16 vlan_tag; 103 u8 reserved1; 104 u8 ip_start; 105 u8 ip_end; 106 u8 immediate_data_length; 107 u8 tcp_offset; 108 u8 reserved2; 109 u16 tcp_end; 110 u8 wrap_tag; 111 u8 descriptors; /* number of valid descriptors in WQE */ 112 u16 reserved3; 113 u16 reserved4; 114 u16 mss; 115 u32 reserved5; 116 union { 117 /* Send WQE Format 1 */ 118 struct { 119 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; 120 } no_immediate_data; 121 122 /* Send WQE Format 2 */ 123 struct { 124 struct ehea_vsgentry sg_entry; 125 /* 0x30 */ 126 u8 immediate_data[SWQE2_MAX_IMM]; 127 /* 0xd0 */ 128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; 129 } immdata_desc __attribute__ ((packed)); 130 131 /* Send WQE Format 3 */ 132 struct { 133 u8 immediate_data[SWQE3_MAX_IMM]; 134 } immdata_nodesc; 135 } u; 136}; 137 138struct ehea_rwqe { 139 u64 wr_id; /* work request ID */ 140 u8 reserved1[5]; 141 u8 data_segments; 142 u16 reserved2; 143 u64 reserved3; 144 u64 reserved4; 145 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; 146}; 147 148#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 149 150#define EHEA_CQE_TYPE_RQ 0x60 151#define EHEA_CQE_STAT_ERR_MASK 0x700F 152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF 153#define EHEA_CQE_STAT_ERR_TCP 0x4000 154#define EHEA_CQE_STAT_ERR_IP 0x2000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000 156 157/* Defines which bad send cqe stati lead to a port reset */ 158#define EHEA_CQE_STAT_RESET_MASK 0x0002 159 160struct ehea_cqe { 161 u64 wr_id; /* work request ID from WQE */ 162 u8 type; 163 u8 valid; 164 u16 status; 165 u16 reserved1; 166 u16 num_bytes_transfered; 167 u16 vlan_tag; 168 u16 inet_checksum_value; 169 u8 reserved2; 170 u8 header_length; 171 u16 reserved3; 172 u16 page_offset; 173 u16 wqe_count; 174 u32 qp_token; 175 u32 timestamp; 176 u32 reserved4; 177 u64 reserved5[3]; 178}; 179 180#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0) 181#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1) 182#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7) 183#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31) 184#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63) 185#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63) 186#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63) 187#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 188#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63) 189#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 192 193#define EHEA_AER_RESTYPE_QP 0x8 194#define EHEA_AER_RESTYPE_CQ 0x4 195#define EHEA_AER_RESTYPE_EQ 0x3 196 197/* Defines which affiliated errors lead to a port reset */ 198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL 199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL 200 201struct ehea_eqe { 202 u64 entry; 203}; 204 205#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63) 206#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7) 207 208static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) 209{ 210 struct ehea_page *current_page; 211 212 if (q_offset >= queue->queue_length) 213 q_offset -= queue->queue_length; 214 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; 215 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; 216} 217 218static inline void *hw_qeit_get(struct hw_queue *queue) 219{ 220 return hw_qeit_calc(queue, queue->current_q_offset); 221} 222 223static inline void hw_qeit_inc(struct hw_queue *queue) 224{ 225 queue->current_q_offset += queue->qe_size; 226 if (queue->current_q_offset >= queue->queue_length) { 227 queue->current_q_offset = 0; 228 /* toggle the valid flag */ 229 queue->toggle_state = (~queue->toggle_state) & 1; 230 } 231} 232 233static inline void *hw_qeit_get_inc(struct hw_queue *queue) 234{ 235 void *retvalue = hw_qeit_get(queue); 236 hw_qeit_inc(queue); 237 return retvalue; 238} 239 240static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) 241{ 242 struct ehea_cqe *retvalue = hw_qeit_get(queue); 243 u8 valid = retvalue->valid; 244 void *pref; 245 246 if ((valid >> 7) == (queue->toggle_state & 1)) { 247 /* this is a good one */ 248 hw_qeit_inc(queue); 249 pref = hw_qeit_calc(queue, queue->current_q_offset); 250 prefetch(pref); 251 prefetch(pref + 128); 252 } else 253 retvalue = NULL; 254 return retvalue; 255} 256 257static inline void *hw_qeit_get_valid(struct hw_queue *queue) 258{ 259 struct ehea_cqe *retvalue = hw_qeit_get(queue); 260 void *pref; 261 u8 valid; 262 263 pref = hw_qeit_calc(queue, queue->current_q_offset); 264 prefetch(pref); 265 prefetch(pref + 128); 266 prefetch(pref + 256); 267 valid = retvalue->valid; 268 if (!((valid >> 7) == (queue->toggle_state & 1))) 269 retvalue = NULL; 270 return retvalue; 271} 272 273static inline void *hw_qeit_reset(struct hw_queue *queue) 274{ 275 queue->current_q_offset = 0; 276 return hw_qeit_get(queue); 277} 278 279static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) 280{ 281 u64 last_entry_in_q = queue->queue_length - queue->qe_size; 282 void *retvalue; 283 284 retvalue = hw_qeit_get(queue); 285 queue->current_q_offset += queue->qe_size; 286 if (queue->current_q_offset > last_entry_in_q) { 287 queue->current_q_offset = 0; 288 queue->toggle_state = (~queue->toggle_state) & 1; 289 } 290 return retvalue; 291} 292 293static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) 294{ 295 void *retvalue = hw_qeit_get(queue); 296 u32 qe = *(u8 *)retvalue; 297 if ((qe >> 7) == (queue->toggle_state & 1)) 298 hw_qeit_eq_get_inc(queue); 299 else 300 retvalue = NULL; 301 return retvalue; 302} 303 304static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, 305 int rq_nr) 306{ 307 struct hw_queue *queue; 308 309 if (rq_nr == 1) 310 queue = &qp->hw_rqueue1; 311 else if (rq_nr == 2) 312 queue = &qp->hw_rqueue2; 313 else 314 queue = &qp->hw_rqueue3; 315 316 return hw_qeit_get_inc(queue); 317} 318 319static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp, 320 int *wqe_index) 321{ 322 struct hw_queue *queue = &my_qp->hw_squeue; 323 struct ehea_swqe *wqe_p; 324 325 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); 326 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); 327 328 return wqe_p; 329} 330 331static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe) 332{ 333 iosync(); 334 ehea_update_sqa(my_qp, 1); 335} 336 337static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) 338{ 339 struct hw_queue *queue = &qp->hw_rqueue1; 340 341 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); 342 return hw_qeit_get_valid(queue); 343} 344 345static inline void ehea_inc_cq(struct ehea_cq *cq) 346{ 347 hw_qeit_inc(&cq->hw_queue); 348} 349 350static inline void ehea_inc_rq1(struct ehea_qp *qp) 351{ 352 hw_qeit_inc(&qp->hw_rqueue1); 353} 354 355static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) 356{ 357 return hw_qeit_get_valid(&my_cq->hw_queue); 358} 359 360#define EHEA_CQ_REGISTER_ORIG 0 361#define EHEA_EQ_REGISTER_ORIG 0 362 363enum ehea_eq_type { 364 EHEA_EQ = 0, /* event queue */ 365 EHEA_NEQ /* notification event queue */ 366}; 367 368struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, 369 enum ehea_eq_type type, 370 const u32 length, const u8 eqe_gen); 371 372int ehea_destroy_eq(struct ehea_eq *eq); 373 374struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq); 375 376struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, 377 u64 eq_handle, u32 cq_token); 378 379int ehea_destroy_cq(struct ehea_cq *cq); 380 381struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd, 382 struct ehea_qp_init_attr *init_attr); 383 384int ehea_destroy_qp(struct ehea_qp *qp); 385 386int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr); 387 388int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, 389 struct ehea_mr *shared_mr); 390 391int ehea_rem_mr(struct ehea_mr *mr); 392 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, 394 u64 *aer, u64 *aerr); 395 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 398int ehea_create_busmap(void); 399void ehea_destroy_busmap(void); 400u64 ehea_map_vaddr(void *caddr); 401 402#endif /* __EHEA_QMR_H__ */