Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

qeth: remove EDDP

Performance measurements showed EDDP does not lower CPU costs but increase
them. So we dump out EDDP code from qeth driver.

Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Frank Blaschka and committed by
David S. Miller
64ef8957 f61a0d05

+111 -980
+1 -1
drivers/s390/net/Makefile
··· 8 8 obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 9 9 obj-$(CONFIG_LCS) += lcs.o cu3088.o 10 10 obj-$(CONFIG_CLAW) += claw.o cu3088.o 11 - qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o 11 + qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 12 12 obj-$(CONFIG_QETH) += qeth.o 13 13 qeth_l2-y += qeth_l2_main.o 14 14 obj-$(CONFIG_QETH_L2) += qeth_l2.o
+2 -5
drivers/s390/net/qeth_core.h
··· 404 404 /* possible types of qeth large_send support */ 405 405 enum qeth_large_send_types { 406 406 QETH_LARGE_SEND_NO, 407 - QETH_LARGE_SEND_EDDP, 408 407 QETH_LARGE_SEND_TSO, 409 408 }; 410 409 ··· 838 839 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 839 840 int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 840 841 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 841 - struct sk_buff *, struct qeth_hdr *, int, 842 - struct qeth_eddp_context *, int, int); 842 + struct sk_buff *, struct qeth_hdr *, int, int, int); 843 843 int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 844 - struct sk_buff *, struct qeth_hdr *, 845 - int, struct qeth_eddp_context *); 844 + struct sk_buff *, struct qeth_hdr *, int); 846 845 int qeth_core_get_stats_count(struct net_device *); 847 846 void qeth_core_get_ethtool_stats(struct net_device *, 848 847 struct ethtool_stats *, u64 *);
+23 -76
drivers/s390/net/qeth_core_main.c
··· 17 17 #include <linux/errno.h> 18 18 #include <linux/kernel.h> 19 19 #include <linux/ip.h> 20 - #include <linux/ipv6.h> 21 20 #include <linux/tcp.h> 22 21 #include <linux/mii.h> 23 22 #include <linux/kthread.h> ··· 25 26 #include <asm/io.h> 26 27 27 28 #include "qeth_core.h" 28 - #include "qeth_core_offl.h" 29 29 30 30 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 31 31 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ ··· 283 285 netif_tx_disable(card->dev); 284 286 card->options.large_send = type; 285 287 switch (card->options.large_send) { 286 - case QETH_LARGE_SEND_EDDP: 287 - if (card->info.type != QETH_CARD_TYPE_IQD) { 288 - card->dev->features |= NETIF_F_TSO | NETIF_F_SG | 289 - NETIF_F_HW_CSUM; 290 - } else { 291 - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | 292 - NETIF_F_HW_CSUM); 293 - card->options.large_send = QETH_LARGE_SEND_NO; 294 - rc = -EOPNOTSUPP; 295 - } 296 - break; 297 288 case QETH_LARGE_SEND_TSO: 298 289 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { 299 290 card->dev->features |= NETIF_F_TSO | NETIF_F_SG | ··· 943 956 dev_kfree_skb_any(skb); 944 957 skb = skb_dequeue(&buf->skb_list); 945 958 } 946 - qeth_eddp_buf_release_contexts(buf); 947 959 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 948 960 if (buf->buffer->element[i].addr && buf->is_header[i]) 949 961 kmem_cache_free(qeth_core_header_cache, ··· 3173 3187 int qeth_do_send_packet_fast(struct qeth_card *card, 3174 3188 struct qeth_qdio_out_q *queue, struct sk_buff *skb, 3175 3189 struct qeth_hdr *hdr, int elements_needed, 3176 - struct qeth_eddp_context *ctx, int offset, int hd_len) 3190 + int offset, int hd_len) 3177 3191 { 3178 3192 struct qeth_qdio_out_buffer *buffer; 3179 - int buffers_needed = 0; 3180 - int flush_cnt = 0; 3181 3193 int index; 3182 3194 3183 3195 /* spin until we get the queue ... */ ··· 3190 3206 */ 3191 3207 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3192 3208 goto out; 3193 - if (ctx == NULL) 3194 - queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % 3209 + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % 3195 3210 QDIO_MAX_BUFFERS_PER_Q; 3196 - else { 3197 - buffers_needed = qeth_eddp_check_buffers_for_context(queue, 3198 - ctx); 3199 - if (buffers_needed < 0) 3200 - goto out; 3201 - queue->next_buf_to_fill = 3202 - (queue->next_buf_to_fill + buffers_needed) % 3203 - QDIO_MAX_BUFFERS_PER_Q; 3204 - } 3205 3211 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3206 - if (ctx == NULL) { 3207 - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3208 - qeth_flush_buffers(queue, index, 1); 3209 - } else { 3210 - flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); 3211 - WARN_ON(buffers_needed != flush_cnt); 3212 - qeth_flush_buffers(queue, index, flush_cnt); 3213 - } 3212 + qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3213 + qeth_flush_buffers(queue, index, 1); 3214 3214 return 0; 3215 3215 out: 3216 3216 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); ··· 3204 3236 3205 3237 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3206 3238 struct sk_buff *skb, struct qeth_hdr *hdr, 3207 - int elements_needed, struct qeth_eddp_context *ctx) 3239 + int elements_needed) 3208 3240 { 3209 3241 struct qeth_qdio_out_buffer *buffer; 3210 3242 int start_index; ··· 3230 3262 qeth_switch_to_packing_if_needed(queue); 3231 3263 if (queue->do_pack) { 3232 3264 do_pack = 1; 3233 - if (ctx == NULL) { 3234 - /* does packet fit in current buffer? */ 3235 - if ((QETH_MAX_BUFFER_ELEMENTS(card) - 3236 - buffer->next_element_to_fill) < elements_needed) { 3237 - /* ... no -> set state PRIMED */ 3238 - atomic_set(&buffer->state, 3239 - QETH_QDIO_BUF_PRIMED); 3240 - flush_count++; 3241 - queue->next_buf_to_fill = 3242 - (queue->next_buf_to_fill + 1) % 3243 - QDIO_MAX_BUFFERS_PER_Q; 3244 - buffer = &queue->bufs[queue->next_buf_to_fill]; 3245 - /* we did a step forward, so check buffer state 3246 - * again */ 3247 - if (atomic_read(&buffer->state) != 3248 - QETH_QDIO_BUF_EMPTY){ 3249 - qeth_flush_buffers(queue, start_index, 3265 + /* does packet fit in current buffer? */ 3266 + if ((QETH_MAX_BUFFER_ELEMENTS(card) - 3267 + buffer->next_element_to_fill) < elements_needed) { 3268 + /* ... no -> set state PRIMED */ 3269 + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3270 + flush_count++; 3271 + queue->next_buf_to_fill = 3272 + (queue->next_buf_to_fill + 1) % 3273 + QDIO_MAX_BUFFERS_PER_Q; 3274 + buffer = &queue->bufs[queue->next_buf_to_fill]; 3275 + /* we did a step forward, so check buffer state 3276 + * again */ 3277 + if (atomic_read(&buffer->state) != 3278 + QETH_QDIO_BUF_EMPTY) { 3279 + qeth_flush_buffers(queue, start_index, 3250 3280 flush_count); 3251 - atomic_set(&queue->state, 3281 + atomic_set(&queue->state, 3252 3282 QETH_OUT_Q_UNLOCKED); 3253 - return -EBUSY; 3254 - } 3255 - } 3256 - } else { 3257 - /* check if we have enough elements (including following 3258 - * free buffers) to handle eddp context */ 3259 - if (qeth_eddp_check_buffers_for_context(queue, ctx) 3260 - < 0) { 3261 - rc = -EBUSY; 3262 - goto out; 3283 + return -EBUSY; 3263 3284 } 3264 3285 } 3265 3286 } 3266 - if (ctx == NULL) 3267 - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); 3268 - else { 3269 - tmp = qeth_eddp_fill_buffer(queue, ctx, 3270 - queue->next_buf_to_fill); 3271 - if (tmp < 0) { 3272 - rc = -EBUSY; 3273 - goto out; 3274 - } 3275 - } 3287 + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); 3276 3288 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % 3277 3289 QDIO_MAX_BUFFERS_PER_Q; 3278 3290 flush_count += tmp; 3279 - out: 3280 3291 if (flush_count) 3281 3292 qeth_flush_buffers(queue, start_index, flush_count); 3282 3293 else if (!atomic_read(&queue->set_pci_flags_count))
-699
drivers/s390/net/qeth_core_offl.c
··· 1 - /* 2 - * drivers/s390/net/qeth_core_offl.c 3 - * 4 - * Copyright IBM Corp. 2007 5 - * Author(s): Thomas Spatzier <tspat@de.ibm.com>, 6 - * Frank Blaschka <frank.blaschka@de.ibm.com> 7 - */ 8 - 9 - #include <linux/errno.h> 10 - #include <linux/ip.h> 11 - #include <linux/inetdevice.h> 12 - #include <linux/netdevice.h> 13 - #include <linux/kernel.h> 14 - #include <linux/tcp.h> 15 - #include <net/tcp.h> 16 - #include <linux/skbuff.h> 17 - 18 - #include <net/ip.h> 19 - #include <net/ip6_checksum.h> 20 - 21 - #include "qeth_core.h" 22 - #include "qeth_core_mpc.h" 23 - #include "qeth_core_offl.h" 24 - 25 - int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, 26 - struct qeth_eddp_context *ctx) 27 - { 28 - int index = queue->next_buf_to_fill; 29 - int elements_needed = ctx->num_elements; 30 - int elements_in_buffer; 31 - int skbs_in_buffer; 32 - int buffers_needed = 0; 33 - 34 - QETH_DBF_TEXT(TRACE, 5, "eddpcbfc"); 35 - while (elements_needed > 0) { 36 - buffers_needed++; 37 - if (atomic_read(&queue->bufs[index].state) != 38 - QETH_QDIO_BUF_EMPTY) 39 - return -EBUSY; 40 - 41 - elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) - 42 - queue->bufs[index].next_element_to_fill; 43 - skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb; 44 - elements_needed -= skbs_in_buffer * ctx->elements_per_skb; 45 - index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; 46 - } 47 - return buffers_needed; 48 - } 49 - 50 - static void qeth_eddp_free_context(struct qeth_eddp_context *ctx) 51 - { 52 - int i; 53 - 54 - QETH_DBF_TEXT(TRACE, 5, "eddpfctx"); 55 - for (i = 0; i < ctx->num_pages; ++i) 56 - free_page((unsigned long)ctx->pages[i]); 57 - kfree(ctx->pages); 58 - kfree(ctx->elements); 59 - kfree(ctx); 60 - } 61 - 62 - 63 - static void qeth_eddp_get_context(struct qeth_eddp_context *ctx) 64 - { 65 - atomic_inc(&ctx->refcnt); 66 - } 67 - 68 - void qeth_eddp_put_context(struct qeth_eddp_context *ctx) 69 - { 70 - if (atomic_dec_return(&ctx->refcnt) == 0) 71 - qeth_eddp_free_context(ctx); 72 - } 73 - EXPORT_SYMBOL_GPL(qeth_eddp_put_context); 74 - 75 - void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) 76 - { 77 - struct qeth_eddp_context_reference *ref; 78 - 79 - QETH_DBF_TEXT(TRACE, 6, "eddprctx"); 80 - while (!list_empty(&buf->ctx_list)) { 81 - ref = list_entry(buf->ctx_list.next, 82 - struct qeth_eddp_context_reference, list); 83 - qeth_eddp_put_context(ref->ctx); 84 - list_del(&ref->list); 85 - kfree(ref); 86 - } 87 - } 88 - 89 - static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 90 - struct qeth_eddp_context *ctx) 91 - { 92 - struct qeth_eddp_context_reference *ref; 93 - 94 - QETH_DBF_TEXT(TRACE, 6, "eddprfcx"); 95 - ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); 96 - if (ref == NULL) 97 - return -ENOMEM; 98 - qeth_eddp_get_context(ctx); 99 - ref->ctx = ctx; 100 - list_add_tail(&ref->list, &buf->ctx_list); 101 - return 0; 102 - } 103 - 104 - int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, 105 - struct qeth_eddp_context *ctx, int index) 106 - { 107 - struct qeth_qdio_out_buffer *buf = NULL; 108 - struct qdio_buffer *buffer; 109 - int elements = ctx->num_elements; 110 - int element = 0; 111 - int flush_cnt = 0; 112 - int must_refcnt = 1; 113 - int i; 114 - 115 - QETH_DBF_TEXT(TRACE, 5, "eddpfibu"); 116 - while (elements > 0) { 117 - buf = &queue->bufs[index]; 118 - if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) { 119 - /* normally this should not happen since we checked for 120 - * available elements in qeth_check_elements_for_context 121 - */ 122 - if (element == 0) 123 - return -EBUSY; 124 - else { 125 - QETH_DBF_MESSAGE(2, "could only partially fill" 126 - "eddp buffer!\n"); 127 - goto out; 128 - } 129 - } 130 - /* check if the whole next skb fits into current buffer */ 131 - if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - 132 - buf->next_element_to_fill) 133 - < ctx->elements_per_skb){ 134 - /* no -> go to next buffer */ 135 - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 136 - index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; 137 - flush_cnt++; 138 - /* new buffer, so we have to add ctx to buffer'ctx_list 139 - * and increment ctx's refcnt */ 140 - must_refcnt = 1; 141 - continue; 142 - } 143 - if (must_refcnt) { 144 - must_refcnt = 0; 145 - if (qeth_eddp_buf_ref_context(buf, ctx)) { 146 - goto out_check; 147 - } 148 - } 149 - buffer = buf->buffer; 150 - /* fill one skb into buffer */ 151 - for (i = 0; i < ctx->elements_per_skb; ++i) { 152 - if (ctx->elements[element].length != 0) { 153 - buffer->element[buf->next_element_to_fill]. 154 - addr = ctx->elements[element].addr; 155 - buffer->element[buf->next_element_to_fill]. 156 - length = ctx->elements[element].length; 157 - buffer->element[buf->next_element_to_fill]. 158 - flags = ctx->elements[element].flags; 159 - buf->next_element_to_fill++; 160 - } 161 - element++; 162 - elements--; 163 - } 164 - } 165 - out_check: 166 - if (!queue->do_pack) { 167 - QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); 168 - /* set state to PRIMED -> will be flushed */ 169 - if (buf->next_element_to_fill > 0) { 170 - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 171 - flush_cnt++; 172 - } 173 - } else { 174 - if (queue->card->options.performance_stats) 175 - queue->card->perf_stats.skbs_sent_pack++; 176 - QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); 177 - if (buf->next_element_to_fill >= 178 - QETH_MAX_BUFFER_ELEMENTS(queue->card)) { 179 - /* 180 - * packed buffer if full -> set state PRIMED 181 - * -> will be flushed 182 - */ 183 - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 184 - flush_cnt++; 185 - } 186 - } 187 - out: 188 - return flush_cnt; 189 - } 190 - 191 - static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 192 - struct qeth_eddp_data *eddp, int data_len) 193 - { 194 - u8 *page; 195 - int page_remainder; 196 - int page_offset; 197 - int pkt_len; 198 - struct qeth_eddp_element *element; 199 - 200 - QETH_DBF_TEXT(TRACE, 5, "eddpcrsh"); 201 - page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 202 - page_offset = ctx->offset % PAGE_SIZE; 203 - element = &ctx->elements[ctx->num_elements]; 204 - pkt_len = eddp->nhl + eddp->thl + data_len; 205 - /* FIXME: layer2 and VLAN !!! */ 206 - if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 207 - pkt_len += ETH_HLEN; 208 - if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 209 - pkt_len += VLAN_HLEN; 210 - /* does complete packet fit in current page ? */ 211 - page_remainder = PAGE_SIZE - page_offset; 212 - if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) { 213 - /* no -> go to start of next page */ 214 - ctx->offset += page_remainder; 215 - page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 216 - page_offset = 0; 217 - } 218 - memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr)); 219 - element->addr = page + page_offset; 220 - element->length = sizeof(struct qeth_hdr); 221 - ctx->offset += sizeof(struct qeth_hdr); 222 - page_offset += sizeof(struct qeth_hdr); 223 - /* add mac header (?) */ 224 - if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 225 - memcpy(page + page_offset, &eddp->mac, ETH_HLEN); 226 - element->length += ETH_HLEN; 227 - ctx->offset += ETH_HLEN; 228 - page_offset += ETH_HLEN; 229 - } 230 - /* add VLAN tag */ 231 - if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 232 - memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); 233 - element->length += VLAN_HLEN; 234 - ctx->offset += VLAN_HLEN; 235 - page_offset += VLAN_HLEN; 236 - } 237 - /* add network header */ 238 - memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl); 239 - element->length += eddp->nhl; 240 - eddp->nh_in_ctx = page + page_offset; 241 - ctx->offset += eddp->nhl; 242 - page_offset += eddp->nhl; 243 - /* add transport header */ 244 - memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl); 245 - element->length += eddp->thl; 246 - eddp->th_in_ctx = page + page_offset; 247 - ctx->offset += eddp->thl; 248 - } 249 - 250 - static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, 251 - int len, __wsum *hcsum) 252 - { 253 - struct skb_frag_struct *frag; 254 - int left_in_frag; 255 - int copy_len; 256 - u8 *src; 257 - 258 - QETH_DBF_TEXT(TRACE, 5, "eddpcdtc"); 259 - if (skb_shinfo(eddp->skb)->nr_frags == 0) { 260 - skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, 261 - dst, len); 262 - *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, 263 - *hcsum); 264 - eddp->skb_offset += len; 265 - } else { 266 - while (len > 0) { 267 - if (eddp->frag < 0) { 268 - /* we're in skb->data */ 269 - left_in_frag = (eddp->skb->len - 270 - eddp->skb->data_len) 271 - - eddp->skb_offset; 272 - src = eddp->skb->data + eddp->skb_offset; 273 - } else { 274 - frag = &skb_shinfo(eddp->skb)->frags[ 275 - eddp->frag]; 276 - left_in_frag = frag->size - eddp->frag_offset; 277 - src = (u8 *)((page_to_pfn(frag->page) << 278 - PAGE_SHIFT) + frag->page_offset + 279 - eddp->frag_offset); 280 - } 281 - if (left_in_frag <= 0) { 282 - eddp->frag++; 283 - eddp->frag_offset = 0; 284 - continue; 285 - } 286 - copy_len = min(left_in_frag, len); 287 - memcpy(dst, src, copy_len); 288 - *hcsum = csum_partial(src, copy_len, *hcsum); 289 - dst += copy_len; 290 - eddp->frag_offset += copy_len; 291 - eddp->skb_offset += copy_len; 292 - len -= copy_len; 293 - } 294 - } 295 - } 296 - 297 - static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 298 - struct qeth_eddp_data *eddp, int data_len, __wsum hcsum) 299 - { 300 - u8 *page; 301 - int page_remainder; 302 - int page_offset; 303 - struct qeth_eddp_element *element; 304 - int first_lap = 1; 305 - 306 - QETH_DBF_TEXT(TRACE, 5, "eddpcsdt"); 307 - page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 308 - page_offset = ctx->offset % PAGE_SIZE; 309 - element = &ctx->elements[ctx->num_elements]; 310 - while (data_len) { 311 - page_remainder = PAGE_SIZE - page_offset; 312 - if (page_remainder < data_len) { 313 - qeth_eddp_copy_data_tcp(page + page_offset, eddp, 314 - page_remainder, &hcsum); 315 - element->length += page_remainder; 316 - if (first_lap) 317 - element->flags = SBAL_FLAGS_FIRST_FRAG; 318 - else 319 - element->flags = SBAL_FLAGS_MIDDLE_FRAG; 320 - ctx->num_elements++; 321 - element++; 322 - data_len -= page_remainder; 323 - ctx->offset += page_remainder; 324 - page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 325 - page_offset = 0; 326 - element->addr = page + page_offset; 327 - } else { 328 - qeth_eddp_copy_data_tcp(page + page_offset, eddp, 329 - data_len, &hcsum); 330 - element->length += data_len; 331 - if (!first_lap) 332 - element->flags = SBAL_FLAGS_LAST_FRAG; 333 - ctx->num_elements++; 334 - ctx->offset += data_len; 335 - data_len = 0; 336 - } 337 - first_lap = 0; 338 - } 339 - ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 340 - } 341 - 342 - static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, 343 - int data_len) 344 - { 345 - __wsum phcsum; /* pseudo header checksum */ 346 - 347 - QETH_DBF_TEXT(TRACE, 5, "eddpckt4"); 348 - eddp->th.tcp.h.check = 0; 349 - /* compute pseudo header checksum */ 350 - phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, 351 - eddp->thl + data_len, IPPROTO_TCP, 0); 352 - /* compute checksum of tcp header */ 353 - return csum_partial(&eddp->th, eddp->thl, phcsum); 354 - } 355 - 356 - static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, 357 - int data_len) 358 - { 359 - __be32 proto; 360 - __wsum phcsum; /* pseudo header checksum */ 361 - 362 - QETH_DBF_TEXT(TRACE, 5, "eddpckt6"); 363 - eddp->th.tcp.h.check = 0; 364 - /* compute pseudo header checksum */ 365 - phcsum = csum_partial(&eddp->nh.ip6.h.saddr, 366 - sizeof(struct in6_addr), 0); 367 - phcsum = csum_partial(&eddp->nh.ip6.h.daddr, 368 - sizeof(struct in6_addr), phcsum); 369 - proto = htonl(IPPROTO_TCP); 370 - phcsum = csum_partial(&proto, sizeof(u32), phcsum); 371 - return phcsum; 372 - } 373 - 374 - static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh, 375 - u8 *nh, u8 nhl, u8 *th, u8 thl) 376 - { 377 - struct qeth_eddp_data *eddp; 378 - 379 - QETH_DBF_TEXT(TRACE, 5, "eddpcrda"); 380 - eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); 381 - if (eddp) { 382 - eddp->nhl = nhl; 383 - eddp->thl = thl; 384 - memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); 385 - memcpy(&eddp->nh, nh, nhl); 386 - memcpy(&eddp->th, th, thl); 387 - eddp->frag = -1; /* initially we're in skb->data */ 388 - } 389 - return eddp; 390 - } 391 - 392 - static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 393 - struct qeth_eddp_data *eddp) 394 - { 395 - struct tcphdr *tcph; 396 - int data_len; 397 - __wsum hcsum; 398 - 399 - QETH_DBF_TEXT(TRACE, 5, "eddpftcp"); 400 - eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 401 - if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 402 - eddp->skb_offset += sizeof(struct ethhdr); 403 - if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 404 - eddp->skb_offset += VLAN_HLEN; 405 - } 406 - tcph = tcp_hdr(eddp->skb); 407 - while (eddp->skb_offset < eddp->skb->len) { 408 - data_len = min((int)skb_shinfo(eddp->skb)->gso_size, 409 - (int)(eddp->skb->len - eddp->skb_offset)); 410 - /* prepare qdio hdr */ 411 - if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 412 - eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + 413 - eddp->nhl + eddp->thl; 414 - if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 415 - eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; 416 - } else 417 - eddp->qh.hdr.l3.length = data_len + eddp->nhl + 418 - eddp->thl; 419 - /* prepare ip hdr */ 420 - if (eddp->skb->protocol == htons(ETH_P_IP)) { 421 - eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl + 422 - eddp->thl); 423 - eddp->nh.ip4.h.check = 0; 424 - eddp->nh.ip4.h.check = 425 - ip_fast_csum((u8 *)&eddp->nh.ip4.h, 426 - eddp->nh.ip4.h.ihl); 427 - } else 428 - eddp->nh.ip6.h.payload_len = htons(data_len + 429 - eddp->thl); 430 - /* prepare tcp hdr */ 431 - if (data_len == (eddp->skb->len - eddp->skb_offset)) { 432 - /* last segment -> set FIN and PSH flags */ 433 - eddp->th.tcp.h.fin = tcph->fin; 434 - eddp->th.tcp.h.psh = tcph->psh; 435 - } 436 - if (eddp->skb->protocol == htons(ETH_P_IP)) 437 - hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len); 438 - else 439 - hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); 440 - /* fill the next segment into the context */ 441 - qeth_eddp_create_segment_hdrs(ctx, eddp, data_len); 442 - qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); 443 - if (eddp->skb_offset >= eddp->skb->len) 444 - break; 445 - /* prepare headers for next round */ 446 - if (eddp->skb->protocol == htons(ETH_P_IP)) 447 - eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1); 448 - eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + 449 - data_len); 450 - } 451 - } 452 - 453 - static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 454 - struct sk_buff *skb, struct qeth_hdr *qhdr) 455 - { 456 - struct qeth_eddp_data *eddp = NULL; 457 - 458 - QETH_DBF_TEXT(TRACE, 5, "eddpficx"); 459 - /* create our segmentation headers and copy original headers */ 460 - if (skb->protocol == htons(ETH_P_IP)) 461 - eddp = qeth_eddp_create_eddp_data(qhdr, 462 - skb_network_header(skb), 463 - ip_hdrlen(skb), 464 - skb_transport_header(skb), 465 - tcp_hdrlen(skb)); 466 - else 467 - eddp = qeth_eddp_create_eddp_data(qhdr, 468 - skb_network_header(skb), 469 - sizeof(struct ipv6hdr), 470 - skb_transport_header(skb), 471 - tcp_hdrlen(skb)); 472 - 473 - if (eddp == NULL) { 474 - QETH_DBF_TEXT(TRACE, 2, "eddpfcnm"); 475 - return -ENOMEM; 476 - } 477 - if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 478 - skb_set_mac_header(skb, sizeof(struct qeth_hdr)); 479 - memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); 480 - if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 481 - eddp->vlan[0] = skb->protocol; 482 - eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); 483 - } 484 - } 485 - /* the next flags will only be set on the last segment */ 486 - eddp->th.tcp.h.fin = 0; 487 - eddp->th.tcp.h.psh = 0; 488 - eddp->skb = skb; 489 - /* begin segmentation and fill context */ 490 - __qeth_eddp_fill_context_tcp(ctx, eddp); 491 - kfree(eddp); 492 - return 0; 493 - } 494 - 495 - static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, 496 - struct sk_buff *skb, int hdr_len) 497 - { 498 - int skbs_per_page; 499 - 500 - QETH_DBF_TEXT(TRACE, 5, "eddpcanp"); 501 - /* can we put multiple skbs in one page? */ 502 - skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); 503 - if (skbs_per_page > 1) { 504 - ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / 505 - skbs_per_page + 1; 506 - ctx->elements_per_skb = 1; 507 - } else { 508 - /* no -> how many elements per skb? */ 509 - ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len + 510 - PAGE_SIZE) >> PAGE_SHIFT; 511 - ctx->num_pages = ctx->elements_per_skb * 512 - (skb_shinfo(skb)->gso_segs + 1); 513 - } 514 - ctx->num_elements = ctx->elements_per_skb * 515 - (skb_shinfo(skb)->gso_segs + 1); 516 - } 517 - 518 - static struct qeth_eddp_context *qeth_eddp_create_context_generic( 519 - struct qeth_card *card, struct sk_buff *skb, int hdr_len) 520 - { 521 - struct qeth_eddp_context *ctx = NULL; 522 - u8 *addr; 523 - int i; 524 - 525 - QETH_DBF_TEXT(TRACE, 5, "creddpcg"); 526 - /* create the context and allocate pages */ 527 - ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); 528 - if (ctx == NULL) { 529 - QETH_DBF_TEXT(TRACE, 2, "ceddpcn1"); 530 - return NULL; 531 - } 532 - ctx->type = QETH_LARGE_SEND_EDDP; 533 - qeth_eddp_calc_num_pages(ctx, skb, hdr_len); 534 - if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) { 535 - QETH_DBF_TEXT(TRACE, 2, "ceddpcis"); 536 - kfree(ctx); 537 - return NULL; 538 - } 539 - ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); 540 - if (ctx->pages == NULL) { 541 - QETH_DBF_TEXT(TRACE, 2, "ceddpcn2"); 542 - kfree(ctx); 543 - return NULL; 544 - } 545 - for (i = 0; i < ctx->num_pages; ++i) { 546 - addr = (u8 *)get_zeroed_page(GFP_ATOMIC); 547 - if (addr == NULL) { 548 - QETH_DBF_TEXT(TRACE, 2, "ceddpcn3"); 549 - ctx->num_pages = i; 550 - qeth_eddp_free_context(ctx); 551 - return NULL; 552 - } 553 - ctx->pages[i] = addr; 554 - } 555 - ctx->elements = kcalloc(ctx->num_elements, 556 - sizeof(struct qeth_eddp_element), GFP_ATOMIC); 557 - if (ctx->elements == NULL) { 558 - QETH_DBF_TEXT(TRACE, 2, "ceddpcn4"); 559 - qeth_eddp_free_context(ctx); 560 - return NULL; 561 - } 562 - /* reset num_elements; will be incremented again in fill_buffer to 563 - * reflect number of actually used elements */ 564 - ctx->num_elements = 0; 565 - return ctx; 566 - } 567 - 568 - static struct qeth_eddp_context *qeth_eddp_create_context_tcp( 569 - struct qeth_card *card, struct sk_buff *skb, 570 - struct qeth_hdr *qhdr) 571 - { 572 - struct qeth_eddp_context *ctx = NULL; 573 - 574 - QETH_DBF_TEXT(TRACE, 5, "creddpct"); 575 - if (skb->protocol == htons(ETH_P_IP)) 576 - ctx = qeth_eddp_create_context_generic(card, skb, 577 - (sizeof(struct qeth_hdr) + 578 - ip_hdrlen(skb) + 579 - tcp_hdrlen(skb))); 580 - else if (skb->protocol == htons(ETH_P_IPV6)) 581 - ctx = qeth_eddp_create_context_generic(card, skb, 582 - sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + 583 - tcp_hdrlen(skb)); 584 - else 585 - QETH_DBF_TEXT(TRACE, 2, "cetcpinv"); 586 - 587 - if (ctx == NULL) { 588 - QETH_DBF_TEXT(TRACE, 2, "creddpnl"); 589 - return NULL; 590 - } 591 - if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) { 592 - QETH_DBF_TEXT(TRACE, 2, "ceddptfe"); 593 - qeth_eddp_free_context(ctx); 594 - return NULL; 595 - } 596 - atomic_set(&ctx->refcnt, 1); 597 - return ctx; 598 - } 599 - 600 - struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card, 601 - struct sk_buff *skb, struct qeth_hdr *qhdr, 602 - unsigned char sk_protocol) 603 - { 604 - QETH_DBF_TEXT(TRACE, 5, "creddpc"); 605 - switch (sk_protocol) { 606 - case IPPROTO_TCP: 607 - return qeth_eddp_create_context_tcp(card, skb, qhdr); 608 - default: 609 - QETH_DBF_TEXT(TRACE, 2, "eddpinvp"); 610 - } 611 - return NULL; 612 - } 613 - EXPORT_SYMBOL_GPL(qeth_eddp_create_context); 614 - 615 - void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr, 616 - struct sk_buff *skb) 617 - { 618 - struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; 619 - struct tcphdr *tcph = tcp_hdr(skb); 620 - struct iphdr *iph = ip_hdr(skb); 621 - struct ipv6hdr *ip6h = ipv6_hdr(skb); 622 - 623 - QETH_DBF_TEXT(TRACE, 5, "tsofhdr"); 624 - 625 - /*fix header to TSO values ...*/ 626 - hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 627 - /*set values which are fix for the first approach ...*/ 628 - hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 629 - hdr->ext.imb_hdr_no = 1; 630 - hdr->ext.hdr_type = 1; 631 - hdr->ext.hdr_version = 1; 632 - hdr->ext.hdr_len = 28; 633 - /*insert non-fix values */ 634 - hdr->ext.mss = skb_shinfo(skb)->gso_size; 635 - hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 636 - hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 637 - sizeof(struct qeth_hdr_tso)); 638 - tcph->check = 0; 639 - if (skb->protocol == ETH_P_IPV6) { 640 - ip6h->payload_len = 0; 641 - tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 642 - 0, IPPROTO_TCP, 0); 643 - } else { 644 - /*OSA want us to set these values ...*/ 645 - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 646 - 0, IPPROTO_TCP, 0); 647 - iph->tot_len = 0; 648 - iph->check = 0; 649 - } 650 - } 651 - EXPORT_SYMBOL_GPL(qeth_tso_fill_header); 652 - 653 - void qeth_tx_csum(struct sk_buff *skb) 654 - { 655 - int tlen; 656 - if (skb->protocol == htons(ETH_P_IP)) { 657 - tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2); 658 - switch (ip_hdr(skb)->protocol) { 659 - case IPPROTO_TCP: 660 - tcp_hdr(skb)->check = 0; 661 - tcp_hdr(skb)->check = csum_tcpudp_magic( 662 - ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 663 - tlen, ip_hdr(skb)->protocol, 664 - skb_checksum(skb, skb_transport_offset(skb), 665 - tlen, 0)); 666 - break; 667 - case IPPROTO_UDP: 668 - udp_hdr(skb)->check = 0; 669 - udp_hdr(skb)->check = csum_tcpudp_magic( 670 - ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 671 - tlen, ip_hdr(skb)->protocol, 672 - skb_checksum(skb, skb_transport_offset(skb), 673 - tlen, 0)); 674 - break; 675 - } 676 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 677 - switch (ipv6_hdr(skb)->nexthdr) { 678 - case IPPROTO_TCP: 679 - tcp_hdr(skb)->check = 0; 680 - tcp_hdr(skb)->check = csum_ipv6_magic( 681 - &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 682 - ipv6_hdr(skb)->payload_len, 683 - ipv6_hdr(skb)->nexthdr, 684 - skb_checksum(skb, skb_transport_offset(skb), 685 - ipv6_hdr(skb)->payload_len, 0)); 686 - break; 687 - case IPPROTO_UDP: 688 - udp_hdr(skb)->check = 0; 689 - udp_hdr(skb)->check = csum_ipv6_magic( 690 - &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 691 - ipv6_hdr(skb)->payload_len, 692 - ipv6_hdr(skb)->nexthdr, 693 - skb_checksum(skb, skb_transport_offset(skb), 694 - ipv6_hdr(skb)->payload_len, 0)); 695 - break; 696 - } 697 - } 698 - } 699 - EXPORT_SYMBOL_GPL(qeth_tx_csum);
-76
drivers/s390/net/qeth_core_offl.h
··· 1 - /* 2 - * drivers/s390/net/qeth_core_offl.h 3 - * 4 - * Copyright IBM Corp. 2007 5 - * Author(s): Thomas Spatzier <tspat@de.ibm.com>, 6 - * Frank Blaschka <frank.blaschka@de.ibm.com> 7 - */ 8 - 9 - #ifndef __QETH_CORE_OFFL_H__ 10 - #define __QETH_CORE_OFFL_H__ 11 - 12 - struct qeth_eddp_element { 13 - u32 flags; 14 - u32 length; 15 - void *addr; 16 - }; 17 - 18 - struct qeth_eddp_context { 19 - atomic_t refcnt; 20 - enum qeth_large_send_types type; 21 - int num_pages; /* # of allocated pages */ 22 - u8 **pages; /* pointers to pages */ 23 - int offset; /* offset in ctx during creation */ 24 - int num_elements; /* # of required 'SBALEs' */ 25 - struct qeth_eddp_element *elements; /* array of 'SBALEs' */ 26 - int elements_per_skb; /* # of 'SBALEs' per skb **/ 27 - }; 28 - 29 - struct qeth_eddp_context_reference { 30 - struct list_head list; 31 - struct qeth_eddp_context *ctx; 32 - }; 33 - 34 - struct qeth_eddp_data { 35 - struct qeth_hdr qh; 36 - struct ethhdr mac; 37 - __be16 vlan[2]; 38 - union { 39 - struct { 40 - struct iphdr h; 41 - u8 options[40]; 42 - } ip4; 43 - struct { 44 - struct ipv6hdr h; 45 - } ip6; 46 - } nh; 47 - u8 nhl; 48 - void *nh_in_ctx; /* address of nh within the ctx */ 49 - union { 50 - struct { 51 - struct tcphdr h; 52 - u8 options[40]; 53 - } tcp; 54 - } th; 55 - u8 thl; 56 - void *th_in_ctx; /* address of th within the ctx */ 57 - struct sk_buff *skb; 58 - int skb_offset; 59 - int frag; 60 - int frag_offset; 61 - } __attribute__ ((packed)); 62 - 63 - extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *, 64 - struct sk_buff *, struct qeth_hdr *, unsigned char); 65 - extern void qeth_eddp_put_context(struct qeth_eddp_context *); 66 - extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *, 67 - struct qeth_eddp_context *, int); 68 - extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *); 69 - extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *, 70 - struct qeth_eddp_context *); 71 - 72 - void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *, 73 - struct sk_buff *); 74 - void qeth_tx_csum(struct sk_buff *skb); 75 - 76 - #endif /* __QETH_CORE_EDDP_H__ */
-4
drivers/s390/net/qeth_core_sys.c
··· 427 427 switch (card->options.large_send) { 428 428 case QETH_LARGE_SEND_NO: 429 429 return sprintf(buf, "%s\n", "no"); 430 - case QETH_LARGE_SEND_EDDP: 431 - return sprintf(buf, "%s\n", "EDDP"); 432 430 case QETH_LARGE_SEND_TSO: 433 431 return sprintf(buf, "%s\n", "TSO"); 434 432 default: ··· 447 449 tmp = strsep((char **) &buf, "\n"); 448 450 if (!strcmp(tmp, "no")) { 449 451 type = QETH_LARGE_SEND_NO; 450 - } else if (!strcmp(tmp, "EDDP")) { 451 - type = QETH_LARGE_SEND_EDDP; 452 452 } else if (!strcmp(tmp, "TSO")) { 453 453 type = QETH_LARGE_SEND_TSO; 454 454 } else {
+8 -73
drivers/s390/net/qeth_l2_main.c
··· 21 21 #include <linux/ip.h> 22 22 23 23 #include "qeth_core.h" 24 - #include "qeth_core_offl.h" 25 24 26 25 static int qeth_l2_set_offline(struct ccwgroup_device *); 27 26 static int qeth_l2_stop(struct net_device *); ··· 633 634 struct qeth_qdio_out_q *queue = card->qdio.out_qs 634 635 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 635 636 int tx_bytes = skb->len; 636 - enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 637 - struct qeth_eddp_context *ctx = NULL; 638 637 int data_offset = -1; 639 638 int elements_needed = 0; 640 639 int hd_len = 0; ··· 652 655 } 653 656 netif_stop_queue(dev); 654 657 655 - if (skb_is_gso(skb)) 656 - large_send = QETH_LARGE_SEND_EDDP; 657 - 658 658 if (card->info.type == QETH_CARD_TYPE_OSN) 659 659 hdr = (struct qeth_hdr *)skb->data; 660 660 else { 661 - if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 662 - (skb_shinfo(skb)->nr_frags == 0)) { 661 + if (card->info.type == QETH_CARD_TYPE_IQD) { 663 662 new_skb = skb; 664 663 data_offset = ETH_HLEN; 665 664 hd_len = ETH_HLEN; ··· 682 689 } 683 690 } 684 691 685 - if (large_send == QETH_LARGE_SEND_EDDP) { 686 - ctx = qeth_eddp_create_context(card, new_skb, hdr, 687 - skb->sk->sk_protocol); 688 - if (ctx == NULL) { 689 - QETH_DBF_MESSAGE(2, "could not create eddp context\n"); 690 - goto tx_drop; 691 - } 692 - } else { 693 - elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 692 + elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 694 693 elements_needed); 695 - if (!elements) { 696 - if (data_offset >= 0) 697 - kmem_cache_free(qeth_core_header_cache, hdr); 698 - goto tx_drop; 699 - } 700 - } 701 - 702 - if ((large_send == QETH_LARGE_SEND_NO) && 703 - (skb->ip_summed == CHECKSUM_PARTIAL)) { 704 - qeth_tx_csum(new_skb); 705 - if (card->options.performance_stats) 706 - card->perf_stats.tx_csum++; 694 + if (!elements) { 695 + if (data_offset >= 0) 696 + kmem_cache_free(qeth_core_header_cache, hdr); 697 + goto tx_drop; 707 698 } 708 699 709 700 if (card->info.type != QETH_CARD_TYPE_IQD) 710 701 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 711 - elements, ctx); 702 + elements); 712 703 else 713 704 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 714 - elements, ctx, data_offset, hd_len); 705 + elements, data_offset, hd_len); 715 706 if (!rc) { 716 707 card->stats.tx_packets++; 717 708 card->stats.tx_bytes += tx_bytes; 718 709 if (new_skb != skb) 719 710 dev_kfree_skb_any(skb); 720 - if (card->options.performance_stats) { 721 - if (large_send != QETH_LARGE_SEND_NO) { 722 - card->perf_stats.large_send_bytes += tx_bytes; 723 - card->perf_stats.large_send_cnt++; 724 - } 725 - if (skb_shinfo(new_skb)->nr_frags > 0) { 726 - card->perf_stats.sg_skbs_sent++; 727 - /* nr_frags + skb->data */ 728 - card->perf_stats.sg_frags_sent += 729 - skb_shinfo(new_skb)->nr_frags + 1; 730 - } 731 - } 732 - 733 - if (ctx != NULL) { 734 - qeth_eddp_put_context(ctx); 735 - dev_kfree_skb_any(new_skb); 736 - } 737 711 } else { 738 - if (ctx != NULL) 739 - qeth_eddp_put_context(ctx); 740 - 741 712 if (data_offset >= 0) 742 713 kmem_cache_free(qeth_core_header_cache, hdr); 743 714 ··· 838 881 return; 839 882 } 840 883 841 - static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data) 842 - { 843 - struct qeth_card *card = dev->ml_priv; 844 - 845 - if (data) { 846 - if (card->options.large_send == QETH_LARGE_SEND_NO) { 847 - card->options.large_send = QETH_LARGE_SEND_EDDP; 848 - dev->features |= NETIF_F_TSO; 849 - } 850 - } else { 851 - dev->features &= ~NETIF_F_TSO; 852 - card->options.large_send = QETH_LARGE_SEND_NO; 853 - } 854 - return 0; 855 - } 856 - 857 884 static struct ethtool_ops qeth_l2_ethtool_ops = { 858 885 .get_link = ethtool_op_get_link, 859 - .get_tx_csum = ethtool_op_get_tx_csum, 860 - .set_tx_csum = ethtool_op_set_tx_hw_csum, 861 - .get_sg = ethtool_op_get_sg, 862 - .set_sg = ethtool_op_set_sg, 863 - .get_tso = ethtool_op_get_tso, 864 - .set_tso = qeth_l2_ethtool_set_tso, 865 886 .get_strings = qeth_core_get_strings, 866 887 .get_ethtool_stats = qeth_core_get_ethtool_stats, 867 888 .get_stats_count = qeth_core_get_stats_count,
+77 -46
drivers/s390/net/qeth_l3_main.c
··· 19 19 #include <linux/etherdevice.h> 20 20 #include <linux/mii.h> 21 21 #include <linux/ip.h> 22 - #include <linux/reboot.h> 22 + #include <linux/ipv6.h> 23 23 #include <linux/inetdevice.h> 24 24 #include <linux/igmp.h> 25 25 26 26 #include <net/ip.h> 27 27 #include <net/arp.h> 28 + #include <net/ip6_checksum.h> 28 29 29 30 #include "qeth_l3.h" 30 - #include "qeth_core_offl.h" 31 31 32 32 static int qeth_l3_set_offline(struct ccwgroup_device *); 33 33 static int qeth_l3_recover(void *); ··· 2577 2577 } 2578 2578 } 2579 2579 2580 + static void qeth_tso_fill_header(struct qeth_card *card, 2581 + struct qeth_hdr *qhdr, struct sk_buff *skb) 2582 + { 2583 + struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; 2584 + struct tcphdr *tcph = tcp_hdr(skb); 2585 + struct iphdr *iph = ip_hdr(skb); 2586 + struct ipv6hdr *ip6h = ipv6_hdr(skb); 2587 + 2588 + /*fix header to TSO values ...*/ 2589 + hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2590 + /*set values which are fix for the first approach ...*/ 2591 + hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2592 + hdr->ext.imb_hdr_no = 1; 2593 + hdr->ext.hdr_type = 1; 2594 + hdr->ext.hdr_version = 1; 2595 + hdr->ext.hdr_len = 28; 2596 + /*insert non-fix values */ 2597 + hdr->ext.mss = skb_shinfo(skb)->gso_size; 2598 + hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 2599 + hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 2600 + sizeof(struct qeth_hdr_tso)); 2601 + tcph->check = 0; 2602 + if (skb->protocol == ETH_P_IPV6) { 2603 + ip6h->payload_len = 0; 2604 + tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 2605 + 0, IPPROTO_TCP, 0); 2606 + } else { 2607 + /*OSA want us to set these values ...*/ 2608 + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2609 + 0, IPPROTO_TCP, 0); 2610 + iph->tot_len = 0; 2611 + iph->check = 0; 2612 + } 2613 + } 2614 + 2615 + static void qeth_tx_csum(struct sk_buff *skb) 2616 + { 2617 + __wsum csum; 2618 + int offset; 2619 + 2620 + skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); 2621 + offset = skb->csum_start - skb_headroom(skb); 2622 + BUG_ON(offset >= skb_headlen(skb)); 2623 + csum = skb_checksum(skb, offset, skb->len - offset, 0); 2624 + 2625 + offset += skb->csum_offset; 2626 + BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2627 + *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2628 + } 2629 + 2580 2630 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2581 2631 { 2582 2632 int rc; 2583 2633 u16 *tag; 2584 2634 struct qeth_hdr *hdr = NULL; 2585 2635 int elements_needed = 0; 2636 + int elems; 2586 2637 struct qeth_card *card = dev->ml_priv; 2587 2638 struct sk_buff *new_skb = NULL; 2588 2639 int ipv = qeth_get_ip_version(skb); ··· 2642 2591 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2643 2592 int tx_bytes = skb->len; 2644 2593 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2645 - struct qeth_eddp_context *ctx = NULL; 2646 2594 int data_offset = -1; 2595 + int nr_frags; 2647 2596 2648 2597 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2649 2598 (skb->protocol != htons(ETH_P_IPV6)) && ··· 2666 2615 2667 2616 if (skb_is_gso(skb)) 2668 2617 large_send = card->options.large_send; 2618 + else 2619 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 2620 + qeth_tx_csum(skb); 2621 + if (card->options.performance_stats) 2622 + card->perf_stats.tx_csum++; 2623 + } 2669 2624 2670 2625 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2671 2626 (skb_shinfo(skb)->nr_frags == 0)) { ··· 2718 2661 netif_stop_queue(dev); 2719 2662 2720 2663 /* fix hardware limitation: as long as we do not have sbal 2721 - * chaining we can not send long frag lists so we temporary 2722 - * switch to EDDP 2664 + * chaining we can not send long frag lists 2723 2665 */ 2724 2666 if ((large_send == QETH_LARGE_SEND_TSO) && 2725 - ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) 2726 - large_send = QETH_LARGE_SEND_EDDP; 2667 + ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2668 + if (skb_linearize(new_skb)) 2669 + goto tx_drop; 2670 + } 2727 2671 2728 2672 if ((large_send == QETH_LARGE_SEND_TSO) && 2729 2673 (cast_type == RTN_UNSPEC)) { ··· 2747 2689 } 2748 2690 } 2749 2691 2750 - if (large_send == QETH_LARGE_SEND_EDDP) { 2751 - /* new_skb is not owned by a socket so we use skb to get 2752 - * the protocol 2753 - */ 2754 - ctx = qeth_eddp_create_context(card, new_skb, hdr, 2755 - skb->sk->sk_protocol); 2756 - if (ctx == NULL) { 2757 - QETH_DBF_MESSAGE(2, "could not create eddp context\n"); 2758 - goto tx_drop; 2759 - } 2760 - } else { 2761 - int elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 2692 + elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 2762 2693 elements_needed); 2763 - if (!elems) { 2764 - if (data_offset >= 0) 2765 - kmem_cache_free(qeth_core_header_cache, hdr); 2766 - goto tx_drop; 2767 - } 2768 - elements_needed += elems; 2694 + if (!elems) { 2695 + if (data_offset >= 0) 2696 + kmem_cache_free(qeth_core_header_cache, hdr); 2697 + goto tx_drop; 2769 2698 } 2770 - 2771 - if ((large_send == QETH_LARGE_SEND_NO) && 2772 - (new_skb->ip_summed == CHECKSUM_PARTIAL)) { 2773 - qeth_tx_csum(new_skb); 2774 - if (card->options.performance_stats) 2775 - card->perf_stats.tx_csum++; 2776 - } 2699 + elements_needed += elems; 2700 + nr_frags = skb_shinfo(new_skb)->nr_frags; 2777 2701 2778 2702 if (card->info.type != QETH_CARD_TYPE_IQD) 2779 2703 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 2780 - elements_needed, ctx); 2704 + elements_needed); 2781 2705 else 2782 2706 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2783 - elements_needed, ctx, data_offset, 0); 2707 + elements_needed, data_offset, 0); 2784 2708 2785 2709 if (!rc) { 2786 2710 card->stats.tx_packets++; ··· 2774 2734 card->perf_stats.large_send_bytes += tx_bytes; 2775 2735 card->perf_stats.large_send_cnt++; 2776 2736 } 2777 - if (skb_shinfo(new_skb)->nr_frags > 0) { 2737 + if (nr_frags) { 2778 2738 card->perf_stats.sg_skbs_sent++; 2779 2739 /* nr_frags + skb->data */ 2780 - card->perf_stats.sg_frags_sent += 2781 - skb_shinfo(new_skb)->nr_frags + 1; 2740 + card->perf_stats.sg_frags_sent += nr_frags + 1; 2782 2741 } 2783 2742 } 2784 - 2785 - if (ctx != NULL) { 2786 - qeth_eddp_put_context(ctx); 2787 - dev_kfree_skb_any(new_skb); 2788 - } 2789 2743 } else { 2790 - if (ctx != NULL) 2791 - qeth_eddp_put_context(ctx); 2792 - 2793 2744 if (data_offset >= 0) 2794 2745 kmem_cache_free(qeth_core_header_cache, hdr); 2795 2746 ··· 2875 2844 if (data) { 2876 2845 if (card->options.large_send == QETH_LARGE_SEND_NO) { 2877 2846 if (card->info.type == QETH_CARD_TYPE_IQD) 2878 - card->options.large_send = QETH_LARGE_SEND_EDDP; 2847 + return -EPERM; 2879 2848 else 2880 2849 card->options.large_send = QETH_LARGE_SEND_TSO; 2881 2850 dev->features |= NETIF_F_TSO;