Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#define KMSG_COMPONENT "qeth"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/compat.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/log2.h>
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/mii.h>
23#include <linux/mm.h>
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/netdevice.h>
28#include <linux/netdev_features.h>
29#include <linux/rcutree.h>
30#include <linux/skbuff.h>
31#include <linux/vmalloc.h>
32
33#include <net/iucv/af_iucv.h>
34#include <net/dsfield.h>
35
36#include <asm/ebcdic.h>
37#include <asm/chpid.h>
38#include <asm/io.h>
39#include <asm/sysinfo.h>
40#include <asm/diag.h>
41#include <asm/cio.h>
42#include <asm/ccwdev.h>
43#include <asm/cpcmd.h>
44
45#include "qeth_core.h"
46
47struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
48 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 /* N P A M L V H */
50 [QETH_DBF_SETUP] = {"qeth_setup",
51 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
52 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53 &debug_sprintf_view, NULL},
54 [QETH_DBF_CTRL] = {"qeth_control",
55 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56};
57EXPORT_SYMBOL_GPL(qeth_dbf);
58
59struct kmem_cache *qeth_core_header_cache;
60EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61static struct kmem_cache *qeth_qdio_outbuf_cache;
62
63static struct device *qeth_core_root_dev;
64static struct dentry *qeth_debugfs_root;
65static struct lock_class_key qdio_out_skb_queue_key;
66
67static void qeth_issue_next_read_cb(struct qeth_card *card,
68 struct qeth_cmd_buffer *iob,
69 unsigned int data_length);
70static int qeth_qdio_establish(struct qeth_card *);
71static void qeth_free_qdio_queues(struct qeth_card *card);
72static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification);
75static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76 int budget);
77static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78
79static void qeth_close_dev_handler(struct work_struct *work)
80{
81 struct qeth_card *card;
82
83 card = container_of(work, struct qeth_card, close_dev_work);
84 QETH_CARD_TEXT(card, 2, "cldevhdl");
85 ccwgroup_set_offline(card->gdev);
86}
87
88static const char *qeth_get_cardname(struct qeth_card *card)
89{
90 if (IS_VM_NIC(card)) {
91 switch (card->info.type) {
92 case QETH_CARD_TYPE_OSD:
93 return " Virtual NIC QDIO";
94 case QETH_CARD_TYPE_IQD:
95 return " Virtual NIC Hiper";
96 case QETH_CARD_TYPE_OSM:
97 return " Virtual NIC QDIO - OSM";
98 case QETH_CARD_TYPE_OSX:
99 return " Virtual NIC QDIO - OSX";
100 default:
101 return " unknown";
102 }
103 } else {
104 switch (card->info.type) {
105 case QETH_CARD_TYPE_OSD:
106 return " OSD Express";
107 case QETH_CARD_TYPE_IQD:
108 return " HiperSockets";
109 case QETH_CARD_TYPE_OSN:
110 return " OSN QDIO";
111 case QETH_CARD_TYPE_OSM:
112 return " OSM QDIO";
113 case QETH_CARD_TYPE_OSX:
114 return " OSX QDIO";
115 default:
116 return " unknown";
117 }
118 }
119 return " n/a";
120}
121
122/* max length to be returned: 14 */
123const char *qeth_get_cardname_short(struct qeth_card *card)
124{
125 if (IS_VM_NIC(card)) {
126 switch (card->info.type) {
127 case QETH_CARD_TYPE_OSD:
128 return "Virt.NIC QDIO";
129 case QETH_CARD_TYPE_IQD:
130 return "Virt.NIC Hiper";
131 case QETH_CARD_TYPE_OSM:
132 return "Virt.NIC OSM";
133 case QETH_CARD_TYPE_OSX:
134 return "Virt.NIC OSX";
135 default:
136 return "unknown";
137 }
138 } else {
139 switch (card->info.type) {
140 case QETH_CARD_TYPE_OSD:
141 switch (card->info.link_type) {
142 case QETH_LINK_TYPE_FAST_ETH:
143 return "OSD_100";
144 case QETH_LINK_TYPE_HSTR:
145 return "HSTR";
146 case QETH_LINK_TYPE_GBIT_ETH:
147 return "OSD_1000";
148 case QETH_LINK_TYPE_10GBIT_ETH:
149 return "OSD_10GIG";
150 case QETH_LINK_TYPE_25GBIT_ETH:
151 return "OSD_25GIG";
152 case QETH_LINK_TYPE_LANE_ETH100:
153 return "OSD_FE_LANE";
154 case QETH_LINK_TYPE_LANE_TR:
155 return "OSD_TR_LANE";
156 case QETH_LINK_TYPE_LANE_ETH1000:
157 return "OSD_GbE_LANE";
158 case QETH_LINK_TYPE_LANE:
159 return "OSD_ATM_LANE";
160 default:
161 return "OSD_Express";
162 }
163 case QETH_CARD_TYPE_IQD:
164 return "HiperSockets";
165 case QETH_CARD_TYPE_OSN:
166 return "OSN";
167 case QETH_CARD_TYPE_OSM:
168 return "OSM_1000";
169 case QETH_CARD_TYPE_OSX:
170 return "OSX_10GIG";
171 default:
172 return "unknown";
173 }
174 }
175 return "n/a";
176}
177
178void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
179 int clear_start_mask)
180{
181 unsigned long flags;
182
183 spin_lock_irqsave(&card->thread_mask_lock, flags);
184 card->thread_allowed_mask = threads;
185 if (clear_start_mask)
186 card->thread_start_mask &= threads;
187 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
188 wake_up(&card->wait_q);
189}
190EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191
192int qeth_threads_running(struct qeth_card *card, unsigned long threads)
193{
194 unsigned long flags;
195 int rc = 0;
196
197 spin_lock_irqsave(&card->thread_mask_lock, flags);
198 rc = (card->thread_running_mask & threads);
199 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
200 return rc;
201}
202EXPORT_SYMBOL_GPL(qeth_threads_running);
203
204void qeth_clear_working_pool_list(struct qeth_card *card)
205{
206 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207
208 QETH_CARD_TEXT(card, 5, "clwrklst");
209 list_for_each_entry_safe(pool_entry, tmp,
210 &card->qdio.in_buf_pool.entry_list, list){
211 list_del(&pool_entry->list);
212 }
213}
214EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
215
216static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
217{
218 unsigned int i;
219
220 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
221 if (entry->elements[i])
222 __free_page(entry->elements[i]);
223 }
224
225 kfree(entry);
226}
227
228static void qeth_free_buffer_pool(struct qeth_card *card)
229{
230 struct qeth_buffer_pool_entry *entry, *tmp;
231
232 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
233 init_list) {
234 list_del(&entry->init_list);
235 qeth_free_pool_entry(entry);
236 }
237}
238
239static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
240{
241 struct qeth_buffer_pool_entry *entry;
242 unsigned int i;
243
244 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
245 if (!entry)
246 return NULL;
247
248 for (i = 0; i < pages; i++) {
249 entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
250
251 if (!entry->elements[i]) {
252 qeth_free_pool_entry(entry);
253 return NULL;
254 }
255 }
256
257 return entry;
258}
259
260static int qeth_alloc_buffer_pool(struct qeth_card *card)
261{
262 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
263 unsigned int i;
264
265 QETH_CARD_TEXT(card, 5, "alocpool");
266 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
267 struct qeth_buffer_pool_entry *entry;
268
269 entry = qeth_alloc_pool_entry(buf_elements);
270 if (!entry) {
271 qeth_free_buffer_pool(card);
272 return -ENOMEM;
273 }
274
275 list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
276 }
277 return 0;
278}
279
280int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
281{
282 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
283 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
284 struct qeth_buffer_pool_entry *entry, *tmp;
285 int delta = count - pool->buf_count;
286 LIST_HEAD(entries);
287
288 QETH_CARD_TEXT(card, 2, "realcbp");
289
290 /* Defer until queue is allocated: */
291 if (!card->qdio.in_q)
292 goto out;
293
294 /* Remove entries from the pool: */
295 while (delta < 0) {
296 entry = list_first_entry(&pool->entry_list,
297 struct qeth_buffer_pool_entry,
298 init_list);
299 list_del(&entry->init_list);
300 qeth_free_pool_entry(entry);
301
302 delta++;
303 }
304
305 /* Allocate additional entries: */
306 while (delta > 0) {
307 entry = qeth_alloc_pool_entry(buf_elements);
308 if (!entry) {
309 list_for_each_entry_safe(entry, tmp, &entries,
310 init_list) {
311 list_del(&entry->init_list);
312 qeth_free_pool_entry(entry);
313 }
314
315 return -ENOMEM;
316 }
317
318 list_add(&entry->init_list, &entries);
319
320 delta--;
321 }
322
323 list_splice(&entries, &pool->entry_list);
324
325out:
326 card->qdio.in_buf_pool.buf_count = count;
327 pool->buf_count = count;
328 return 0;
329}
330EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
331
332static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
333{
334 if (!q)
335 return;
336
337 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
338 kfree(q);
339}
340
341static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
342{
343 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
344 int i;
345
346 if (!q)
347 return NULL;
348
349 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
350 kfree(q);
351 return NULL;
352 }
353
354 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
355 q->bufs[i].buffer = q->qdio_bufs[i];
356
357 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
358 return q;
359}
360
361static int qeth_cq_init(struct qeth_card *card)
362{
363 int rc;
364
365 if (card->options.cq == QETH_CQ_ENABLED) {
366 QETH_CARD_TEXT(card, 2, "cqinit");
367 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
368 QDIO_MAX_BUFFERS_PER_Q);
369 card->qdio.c_q->next_buf_to_init = 127;
370 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
371 card->qdio.no_in_queues - 1, 0,
372 127);
373 if (rc) {
374 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
375 goto out;
376 }
377 }
378 rc = 0;
379out:
380 return rc;
381}
382
383static int qeth_alloc_cq(struct qeth_card *card)
384{
385 int rc;
386
387 if (card->options.cq == QETH_CQ_ENABLED) {
388 int i;
389 struct qdio_outbuf_state *outbuf_states;
390
391 QETH_CARD_TEXT(card, 2, "cqon");
392 card->qdio.c_q = qeth_alloc_qdio_queue();
393 if (!card->qdio.c_q) {
394 rc = -1;
395 goto kmsg_out;
396 }
397 card->qdio.no_in_queues = 2;
398 card->qdio.out_bufstates =
399 kcalloc(card->qdio.no_out_queues *
400 QDIO_MAX_BUFFERS_PER_Q,
401 sizeof(struct qdio_outbuf_state),
402 GFP_KERNEL);
403 outbuf_states = card->qdio.out_bufstates;
404 if (outbuf_states == NULL) {
405 rc = -1;
406 goto free_cq_out;
407 }
408 for (i = 0; i < card->qdio.no_out_queues; ++i) {
409 card->qdio.out_qs[i]->bufstates = outbuf_states;
410 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
411 }
412 } else {
413 QETH_CARD_TEXT(card, 2, "nocq");
414 card->qdio.c_q = NULL;
415 card->qdio.no_in_queues = 1;
416 }
417 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
418 rc = 0;
419out:
420 return rc;
421free_cq_out:
422 qeth_free_qdio_queue(card->qdio.c_q);
423 card->qdio.c_q = NULL;
424kmsg_out:
425 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
426 goto out;
427}
428
429static void qeth_free_cq(struct qeth_card *card)
430{
431 if (card->qdio.c_q) {
432 --card->qdio.no_in_queues;
433 qeth_free_qdio_queue(card->qdio.c_q);
434 card->qdio.c_q = NULL;
435 }
436 kfree(card->qdio.out_bufstates);
437 card->qdio.out_bufstates = NULL;
438}
439
440static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
441 int delayed)
442{
443 enum iucv_tx_notify n;
444
445 switch (sbalf15) {
446 case 0:
447 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
448 break;
449 case 4:
450 case 16:
451 case 17:
452 case 18:
453 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
454 TX_NOTIFY_UNREACHABLE;
455 break;
456 default:
457 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
458 TX_NOTIFY_GENERALERROR;
459 break;
460 }
461
462 return n;
463}
464
465static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
466 int forced_cleanup)
467{
468 if (q->card->options.cq != QETH_CQ_ENABLED)
469 return;
470
471 if (q->bufs[bidx]->next_pending != NULL) {
472 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
473 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
474
475 while (c) {
476 if (forced_cleanup ||
477 atomic_read(&c->state) ==
478 QETH_QDIO_BUF_HANDLED_DELAYED) {
479 struct qeth_qdio_out_buffer *f = c;
480 QETH_CARD_TEXT(f->q->card, 5, "fp");
481 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
482 /* release here to avoid interleaving between
483 outbound tasklet and inbound tasklet
484 regarding notifications and lifecycle */
485 qeth_tx_complete_buf(c, forced_cleanup, 0);
486
487 c = f->next_pending;
488 WARN_ON_ONCE(head->next_pending != f);
489 head->next_pending = c;
490 kmem_cache_free(qeth_qdio_outbuf_cache, f);
491 } else {
492 head = c;
493 c = c->next_pending;
494 }
495
496 }
497 }
498 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
499 QETH_QDIO_BUF_HANDLED_DELAYED)) {
500 /* for recovery situations */
501 qeth_init_qdio_out_buf(q, bidx);
502 QETH_CARD_TEXT(q->card, 2, "clprecov");
503 }
504}
505
506
507static void qeth_qdio_handle_aob(struct qeth_card *card,
508 unsigned long phys_aob_addr)
509{
510 struct qaob *aob;
511 struct qeth_qdio_out_buffer *buffer;
512 enum iucv_tx_notify notification;
513 unsigned int i;
514
515 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
516 QETH_CARD_TEXT(card, 5, "haob");
517 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
518 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
519 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
520
521 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
522 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
523 notification = TX_NOTIFY_OK;
524 } else {
525 WARN_ON_ONCE(atomic_read(&buffer->state) !=
526 QETH_QDIO_BUF_PENDING);
527 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
528 notification = TX_NOTIFY_DELAYED_OK;
529 }
530
531 if (aob->aorc != 0) {
532 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
533 notification = qeth_compute_cq_notification(aob->aorc, 1);
534 }
535 qeth_notify_skbs(buffer->q, buffer, notification);
536
537 /* Free dangling allocations. The attached skbs are handled by
538 * qeth_cleanup_handled_pending().
539 */
540 for (i = 0;
541 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
542 i++) {
543 void *data = phys_to_virt(aob->sba[i]);
544
545 if (data && buffer->is_header[i])
546 kmem_cache_free(qeth_core_header_cache, data);
547 }
548 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
549
550 qdio_release_aob(aob);
551}
552
553static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
554 void *data)
555{
556 ccw->cmd_code = cmd_code;
557 ccw->flags = flags | CCW_FLAG_SLI;
558 ccw->count = len;
559 ccw->cda = (__u32) __pa(data);
560}
561
562static int __qeth_issue_next_read(struct qeth_card *card)
563{
564 struct qeth_cmd_buffer *iob = card->read_cmd;
565 struct qeth_channel *channel = iob->channel;
566 struct ccw1 *ccw = __ccw_from_cmd(iob);
567 int rc;
568
569 QETH_CARD_TEXT(card, 5, "issnxrd");
570 if (channel->state != CH_STATE_UP)
571 return -EIO;
572
573 memset(iob->data, 0, iob->length);
574 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
575 iob->callback = qeth_issue_next_read_cb;
576 /* keep the cmd alive after completion: */
577 qeth_get_cmd(iob);
578
579 QETH_CARD_TEXT(card, 6, "noirqpnd");
580 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
581 if (!rc) {
582 channel->active_cmd = iob;
583 } else {
584 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
585 rc, CARD_DEVID(card));
586 qeth_unlock_channel(card, channel);
587 qeth_put_cmd(iob);
588 card->read_or_write_problem = 1;
589 qeth_schedule_recovery(card);
590 }
591 return rc;
592}
593
594static int qeth_issue_next_read(struct qeth_card *card)
595{
596 int ret;
597
598 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
599 ret = __qeth_issue_next_read(card);
600 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
601
602 return ret;
603}
604
605static void qeth_enqueue_cmd(struct qeth_card *card,
606 struct qeth_cmd_buffer *iob)
607{
608 spin_lock_irq(&card->lock);
609 list_add_tail(&iob->list, &card->cmd_waiter_list);
610 spin_unlock_irq(&card->lock);
611}
612
613static void qeth_dequeue_cmd(struct qeth_card *card,
614 struct qeth_cmd_buffer *iob)
615{
616 spin_lock_irq(&card->lock);
617 list_del(&iob->list);
618 spin_unlock_irq(&card->lock);
619}
620
621void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
622{
623 iob->rc = reason;
624 complete(&iob->done);
625}
626EXPORT_SYMBOL_GPL(qeth_notify_cmd);
627
628static void qeth_flush_local_addrs4(struct qeth_card *card)
629{
630 struct qeth_local_addr *addr;
631 struct hlist_node *tmp;
632 unsigned int i;
633
634 spin_lock_irq(&card->local_addrs4_lock);
635 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
636 hash_del_rcu(&addr->hnode);
637 kfree_rcu(addr, rcu);
638 }
639 spin_unlock_irq(&card->local_addrs4_lock);
640}
641
642static void qeth_flush_local_addrs6(struct qeth_card *card)
643{
644 struct qeth_local_addr *addr;
645 struct hlist_node *tmp;
646 unsigned int i;
647
648 spin_lock_irq(&card->local_addrs6_lock);
649 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
650 hash_del_rcu(&addr->hnode);
651 kfree_rcu(addr, rcu);
652 }
653 spin_unlock_irq(&card->local_addrs6_lock);
654}
655
656void qeth_flush_local_addrs(struct qeth_card *card)
657{
658 qeth_flush_local_addrs4(card);
659 qeth_flush_local_addrs6(card);
660}
661EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
662
663static void qeth_add_local_addrs4(struct qeth_card *card,
664 struct qeth_ipacmd_local_addrs4 *cmd)
665{
666 unsigned int i;
667
668 if (cmd->addr_length !=
669 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
670 dev_err_ratelimited(&card->gdev->dev,
671 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
672 cmd->addr_length);
673 return;
674 }
675
676 spin_lock(&card->local_addrs4_lock);
677 for (i = 0; i < cmd->count; i++) {
678 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
679 struct qeth_local_addr *addr;
680 bool duplicate = false;
681
682 hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
683 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
684 duplicate = true;
685 break;
686 }
687 }
688
689 if (duplicate)
690 continue;
691
692 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
693 if (!addr) {
694 dev_err(&card->gdev->dev,
695 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
696 &cmd->addrs[i].addr);
697 continue;
698 }
699
700 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
701 hash_add_rcu(card->local_addrs4, &addr->hnode, key);
702 }
703 spin_unlock(&card->local_addrs4_lock);
704}
705
706static void qeth_add_local_addrs6(struct qeth_card *card,
707 struct qeth_ipacmd_local_addrs6 *cmd)
708{
709 unsigned int i;
710
711 if (cmd->addr_length !=
712 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
713 dev_err_ratelimited(&card->gdev->dev,
714 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
715 cmd->addr_length);
716 return;
717 }
718
719 spin_lock(&card->local_addrs6_lock);
720 for (i = 0; i < cmd->count; i++) {
721 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
722 struct qeth_local_addr *addr;
723 bool duplicate = false;
724
725 hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
726 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
727 duplicate = true;
728 break;
729 }
730 }
731
732 if (duplicate)
733 continue;
734
735 addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
736 if (!addr) {
737 dev_err(&card->gdev->dev,
738 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
739 &cmd->addrs[i].addr);
740 continue;
741 }
742
743 addr->addr = cmd->addrs[i].addr;
744 hash_add_rcu(card->local_addrs6, &addr->hnode, key);
745 }
746 spin_unlock(&card->local_addrs6_lock);
747}
748
749static void qeth_del_local_addrs4(struct qeth_card *card,
750 struct qeth_ipacmd_local_addrs4 *cmd)
751{
752 unsigned int i;
753
754 if (cmd->addr_length !=
755 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
756 dev_err_ratelimited(&card->gdev->dev,
757 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
758 cmd->addr_length);
759 return;
760 }
761
762 spin_lock(&card->local_addrs4_lock);
763 for (i = 0; i < cmd->count; i++) {
764 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
765 unsigned int key = ipv4_addr_hash(addr->addr);
766 struct qeth_local_addr *tmp;
767
768 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
769 if (tmp->addr.s6_addr32[3] == addr->addr) {
770 hash_del_rcu(&tmp->hnode);
771 kfree_rcu(tmp, rcu);
772 break;
773 }
774 }
775 }
776 spin_unlock(&card->local_addrs4_lock);
777}
778
779static void qeth_del_local_addrs6(struct qeth_card *card,
780 struct qeth_ipacmd_local_addrs6 *cmd)
781{
782 unsigned int i;
783
784 if (cmd->addr_length !=
785 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
786 dev_err_ratelimited(&card->gdev->dev,
787 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
788 cmd->addr_length);
789 return;
790 }
791
792 spin_lock(&card->local_addrs6_lock);
793 for (i = 0; i < cmd->count; i++) {
794 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
795 u32 key = ipv6_addr_hash(&addr->addr);
796 struct qeth_local_addr *tmp;
797
798 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
799 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
800 hash_del_rcu(&tmp->hnode);
801 kfree_rcu(tmp, rcu);
802 break;
803 }
804 }
805 }
806 spin_unlock(&card->local_addrs6_lock);
807}
808
809static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
810 struct sk_buff *skb)
811{
812 struct qeth_local_addr *tmp;
813 bool is_local = false;
814 unsigned int key;
815 __be32 next_hop;
816
817 if (hash_empty(card->local_addrs4))
818 return false;
819
820 rcu_read_lock();
821 next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
822 key = ipv4_addr_hash(next_hop);
823
824 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
825 if (tmp->addr.s6_addr32[3] == next_hop) {
826 is_local = true;
827 break;
828 }
829 }
830 rcu_read_unlock();
831
832 return is_local;
833}
834
835static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
836 struct sk_buff *skb)
837{
838 struct qeth_local_addr *tmp;
839 struct in6_addr *next_hop;
840 bool is_local = false;
841 u32 key;
842
843 if (hash_empty(card->local_addrs6))
844 return false;
845
846 rcu_read_lock();
847 next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
848 key = ipv6_addr_hash(next_hop);
849
850 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
851 if (ipv6_addr_equal(&tmp->addr, next_hop)) {
852 is_local = true;
853 break;
854 }
855 }
856 rcu_read_unlock();
857
858 return is_local;
859}
860
861static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
862{
863 struct qeth_card *card = m->private;
864 struct qeth_local_addr *tmp;
865 unsigned int i;
866
867 rcu_read_lock();
868 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
869 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
870 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
871 seq_printf(m, "%pI6c\n", &tmp->addr);
872 rcu_read_unlock();
873
874 return 0;
875}
876
877DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
878
879static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
880 struct qeth_card *card)
881{
882 const char *ipa_name;
883 int com = cmd->hdr.command;
884 ipa_name = qeth_get_ipa_cmd_name(com);
885
886 if (rc)
887 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
888 ipa_name, com, CARD_DEVID(card), rc,
889 qeth_get_ipa_msg(rc));
890 else
891 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
892 ipa_name, com, CARD_DEVID(card));
893}
894
895static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
896 struct qeth_ipa_cmd *cmd)
897{
898 QETH_CARD_TEXT(card, 5, "chkipad");
899
900 if (IS_IPA_REPLY(cmd)) {
901 if (cmd->hdr.command != IPA_CMD_SETCCID &&
902 cmd->hdr.command != IPA_CMD_DELCCID &&
903 cmd->hdr.command != IPA_CMD_MODCCID &&
904 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
905 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
906 return cmd;
907 }
908
909 /* handle unsolicited event: */
910 switch (cmd->hdr.command) {
911 case IPA_CMD_STOPLAN:
912 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
913 dev_err(&card->gdev->dev,
914 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
915 QETH_CARD_IFNAME(card));
916 schedule_work(&card->close_dev_work);
917 } else {
918 dev_warn(&card->gdev->dev,
919 "The link for interface %s on CHPID 0x%X failed\n",
920 QETH_CARD_IFNAME(card), card->info.chpid);
921 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
922 netif_carrier_off(card->dev);
923 }
924 return NULL;
925 case IPA_CMD_STARTLAN:
926 dev_info(&card->gdev->dev,
927 "The link for %s on CHPID 0x%X has been restored\n",
928 QETH_CARD_IFNAME(card), card->info.chpid);
929 if (card->info.hwtrap)
930 card->info.hwtrap = 2;
931 qeth_schedule_recovery(card);
932 return NULL;
933 case IPA_CMD_SETBRIDGEPORT_IQD:
934 case IPA_CMD_SETBRIDGEPORT_OSA:
935 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
936 if (card->discipline->control_event_handler(card, cmd))
937 return cmd;
938 return NULL;
939 case IPA_CMD_MODCCID:
940 return cmd;
941 case IPA_CMD_REGISTER_LOCAL_ADDR:
942 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
943 qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
944 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
945 qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
946
947 QETH_CARD_TEXT(card, 3, "irla");
948 return NULL;
949 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
950 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
951 qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
952 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
953 qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
954
955 QETH_CARD_TEXT(card, 3, "urla");
956 return NULL;
957 default:
958 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
959 return cmd;
960 }
961}
962
963void qeth_clear_ipacmd_list(struct qeth_card *card)
964{
965 struct qeth_cmd_buffer *iob;
966 unsigned long flags;
967
968 QETH_CARD_TEXT(card, 4, "clipalst");
969
970 spin_lock_irqsave(&card->lock, flags);
971 list_for_each_entry(iob, &card->cmd_waiter_list, list)
972 qeth_notify_cmd(iob, -EIO);
973 spin_unlock_irqrestore(&card->lock, flags);
974}
975EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
976
977static int qeth_check_idx_response(struct qeth_card *card,
978 unsigned char *buffer)
979{
980 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
981 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
982 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
983 buffer[4]);
984 QETH_CARD_TEXT(card, 2, "ckidxres");
985 QETH_CARD_TEXT(card, 2, " idxterm");
986 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
987 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
988 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
989 dev_err(&card->gdev->dev,
990 "The device does not support the configured transport mode\n");
991 return -EPROTONOSUPPORT;
992 }
993 return -EIO;
994 }
995 return 0;
996}
997
998void qeth_put_cmd(struct qeth_cmd_buffer *iob)
999{
1000 if (refcount_dec_and_test(&iob->ref_count)) {
1001 kfree(iob->data);
1002 kfree(iob);
1003 }
1004}
1005EXPORT_SYMBOL_GPL(qeth_put_cmd);
1006
1007static void qeth_release_buffer_cb(struct qeth_card *card,
1008 struct qeth_cmd_buffer *iob,
1009 unsigned int data_length)
1010{
1011 qeth_put_cmd(iob);
1012}
1013
1014static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
1015{
1016 qeth_notify_cmd(iob, rc);
1017 qeth_put_cmd(iob);
1018}
1019
1020struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
1021 unsigned int length, unsigned int ccws,
1022 long timeout)
1023{
1024 struct qeth_cmd_buffer *iob;
1025
1026 if (length > QETH_BUFSIZE)
1027 return NULL;
1028
1029 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1030 if (!iob)
1031 return NULL;
1032
1033 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1034 GFP_KERNEL | GFP_DMA);
1035 if (!iob->data) {
1036 kfree(iob);
1037 return NULL;
1038 }
1039
1040 init_completion(&iob->done);
1041 spin_lock_init(&iob->lock);
1042 INIT_LIST_HEAD(&iob->list);
1043 refcount_set(&iob->ref_count, 1);
1044 iob->channel = channel;
1045 iob->timeout = timeout;
1046 iob->length = length;
1047 return iob;
1048}
1049EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1050
1051static void qeth_issue_next_read_cb(struct qeth_card *card,
1052 struct qeth_cmd_buffer *iob,
1053 unsigned int data_length)
1054{
1055 struct qeth_cmd_buffer *request = NULL;
1056 struct qeth_ipa_cmd *cmd = NULL;
1057 struct qeth_reply *reply = NULL;
1058 struct qeth_cmd_buffer *tmp;
1059 unsigned long flags;
1060 int rc = 0;
1061
1062 QETH_CARD_TEXT(card, 4, "sndctlcb");
1063 rc = qeth_check_idx_response(card, iob->data);
1064 switch (rc) {
1065 case 0:
1066 break;
1067 case -EIO:
1068 qeth_schedule_recovery(card);
1069 /* fall through */
1070 default:
1071 qeth_clear_ipacmd_list(card);
1072 goto err_idx;
1073 }
1074
1075 cmd = __ipa_reply(iob);
1076 if (cmd) {
1077 cmd = qeth_check_ipa_data(card, cmd);
1078 if (!cmd)
1079 goto out;
1080 if (IS_OSN(card) && card->osn_info.assist_cb &&
1081 cmd->hdr.command != IPA_CMD_STARTLAN) {
1082 card->osn_info.assist_cb(card->dev, cmd);
1083 goto out;
1084 }
1085 }
1086
1087 /* match against pending cmd requests */
1088 spin_lock_irqsave(&card->lock, flags);
1089 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1090 if (tmp->match && tmp->match(tmp, iob)) {
1091 request = tmp;
1092 /* take the object outside the lock */
1093 qeth_get_cmd(request);
1094 break;
1095 }
1096 }
1097 spin_unlock_irqrestore(&card->lock, flags);
1098
1099 if (!request)
1100 goto out;
1101
1102 reply = &request->reply;
1103 if (!reply->callback) {
1104 rc = 0;
1105 goto no_callback;
1106 }
1107
1108 spin_lock_irqsave(&request->lock, flags);
1109 if (request->rc)
1110 /* Bail out when the requestor has already left: */
1111 rc = request->rc;
1112 else
1113 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1114 (unsigned long)iob);
1115 spin_unlock_irqrestore(&request->lock, flags);
1116
1117no_callback:
1118 if (rc <= 0)
1119 qeth_notify_cmd(request, rc);
1120 qeth_put_cmd(request);
1121out:
1122 memcpy(&card->seqno.pdu_hdr_ack,
1123 QETH_PDU_HEADER_SEQ_NO(iob->data),
1124 QETH_SEQ_NO_LENGTH);
1125 __qeth_issue_next_read(card);
1126err_idx:
1127 qeth_put_cmd(iob);
1128}
1129
1130static int qeth_set_thread_start_bit(struct qeth_card *card,
1131 unsigned long thread)
1132{
1133 unsigned long flags;
1134 int rc = 0;
1135
1136 spin_lock_irqsave(&card->thread_mask_lock, flags);
1137 if (!(card->thread_allowed_mask & thread))
1138 rc = -EPERM;
1139 else if (card->thread_start_mask & thread)
1140 rc = -EBUSY;
1141 else
1142 card->thread_start_mask |= thread;
1143 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1144
1145 return rc;
1146}
1147
1148static void qeth_clear_thread_start_bit(struct qeth_card *card,
1149 unsigned long thread)
1150{
1151 unsigned long flags;
1152
1153 spin_lock_irqsave(&card->thread_mask_lock, flags);
1154 card->thread_start_mask &= ~thread;
1155 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1156 wake_up(&card->wait_q);
1157}
1158
1159static void qeth_clear_thread_running_bit(struct qeth_card *card,
1160 unsigned long thread)
1161{
1162 unsigned long flags;
1163
1164 spin_lock_irqsave(&card->thread_mask_lock, flags);
1165 card->thread_running_mask &= ~thread;
1166 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1167 wake_up_all(&card->wait_q);
1168}
1169
1170static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1171{
1172 unsigned long flags;
1173 int rc = 0;
1174
1175 spin_lock_irqsave(&card->thread_mask_lock, flags);
1176 if (card->thread_start_mask & thread) {
1177 if ((card->thread_allowed_mask & thread) &&
1178 !(card->thread_running_mask & thread)) {
1179 rc = 1;
1180 card->thread_start_mask &= ~thread;
1181 card->thread_running_mask |= thread;
1182 } else
1183 rc = -EPERM;
1184 }
1185 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1186 return rc;
1187}
1188
1189static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1190{
1191 int rc = 0;
1192
1193 wait_event(card->wait_q,
1194 (rc = __qeth_do_run_thread(card, thread)) >= 0);
1195 return rc;
1196}
1197
1198int qeth_schedule_recovery(struct qeth_card *card)
1199{
1200 int rc;
1201
1202 QETH_CARD_TEXT(card, 2, "startrec");
1203
1204 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1205 if (!rc)
1206 schedule_work(&card->kernel_thread_starter);
1207
1208 return rc;
1209}
1210
1211static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1212 struct irb *irb)
1213{
1214 int dstat, cstat;
1215 char *sense;
1216
1217 sense = (char *) irb->ecw;
1218 cstat = irb->scsw.cmd.cstat;
1219 dstat = irb->scsw.cmd.dstat;
1220
1221 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1222 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1223 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1224 QETH_CARD_TEXT(card, 2, "CGENCHK");
1225 dev_warn(&cdev->dev, "The qeth device driver "
1226 "failed to recover an error on the device\n");
1227 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1228 CCW_DEVID(cdev), dstat, cstat);
1229 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1230 16, 1, irb, 64, 1);
1231 return -EIO;
1232 }
1233
1234 if (dstat & DEV_STAT_UNIT_CHECK) {
1235 if (sense[SENSE_RESETTING_EVENT_BYTE] &
1236 SENSE_RESETTING_EVENT_FLAG) {
1237 QETH_CARD_TEXT(card, 2, "REVIND");
1238 return -EIO;
1239 }
1240 if (sense[SENSE_COMMAND_REJECT_BYTE] &
1241 SENSE_COMMAND_REJECT_FLAG) {
1242 QETH_CARD_TEXT(card, 2, "CMDREJi");
1243 return -EIO;
1244 }
1245 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1246 QETH_CARD_TEXT(card, 2, "AFFE");
1247 return -EIO;
1248 }
1249 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1250 QETH_CARD_TEXT(card, 2, "ZEROSEN");
1251 return 0;
1252 }
1253 QETH_CARD_TEXT(card, 2, "DGENCHK");
1254 return -EIO;
1255 }
1256 return 0;
1257}
1258
1259static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1260 struct irb *irb)
1261{
1262 if (!IS_ERR(irb))
1263 return 0;
1264
1265 switch (PTR_ERR(irb)) {
1266 case -EIO:
1267 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1268 CCW_DEVID(cdev));
1269 QETH_CARD_TEXT(card, 2, "ckirberr");
1270 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
1271 return -EIO;
1272 case -ETIMEDOUT:
1273 dev_warn(&cdev->dev, "A hardware operation timed out"
1274 " on the device\n");
1275 QETH_CARD_TEXT(card, 2, "ckirberr");
1276 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
1277 return -ETIMEDOUT;
1278 default:
1279 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1280 PTR_ERR(irb), CCW_DEVID(cdev));
1281 QETH_CARD_TEXT(card, 2, "ckirberr");
1282 QETH_CARD_TEXT(card, 2, " rc???");
1283 return PTR_ERR(irb);
1284 }
1285}
1286
1287static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1288 struct irb *irb)
1289{
1290 int rc;
1291 int cstat, dstat;
1292 struct qeth_cmd_buffer *iob = NULL;
1293 struct ccwgroup_device *gdev;
1294 struct qeth_channel *channel;
1295 struct qeth_card *card;
1296
1297 /* while we hold the ccwdev lock, this stays valid: */
1298 gdev = dev_get_drvdata(&cdev->dev);
1299 card = dev_get_drvdata(&gdev->dev);
1300
1301 QETH_CARD_TEXT(card, 5, "irq");
1302
1303 if (card->read.ccwdev == cdev) {
1304 channel = &card->read;
1305 QETH_CARD_TEXT(card, 5, "read");
1306 } else if (card->write.ccwdev == cdev) {
1307 channel = &card->write;
1308 QETH_CARD_TEXT(card, 5, "write");
1309 } else {
1310 channel = &card->data;
1311 QETH_CARD_TEXT(card, 5, "data");
1312 }
1313
1314 if (intparm == 0) {
1315 QETH_CARD_TEXT(card, 5, "irqunsol");
1316 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1317 QETH_CARD_TEXT(card, 5, "irqunexp");
1318
1319 dev_err(&cdev->dev,
1320 "Received IRQ with intparm %lx, expected %px\n",
1321 intparm, channel->active_cmd);
1322 if (channel->active_cmd)
1323 qeth_cancel_cmd(channel->active_cmd, -EIO);
1324 } else {
1325 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1326 }
1327
1328 channel->active_cmd = NULL;
1329 qeth_unlock_channel(card, channel);
1330
1331 rc = qeth_check_irb_error(card, cdev, irb);
1332 if (rc) {
1333 /* IO was terminated, free its resources. */
1334 if (iob)
1335 qeth_cancel_cmd(iob, rc);
1336 return;
1337 }
1338
1339 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1340 channel->state = CH_STATE_STOPPED;
1341 wake_up(&card->wait_q);
1342 }
1343
1344 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1345 channel->state = CH_STATE_HALTED;
1346 wake_up(&card->wait_q);
1347 }
1348
1349 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1350 SCSW_FCTL_HALT_FUNC))) {
1351 qeth_cancel_cmd(iob, -ECANCELED);
1352 iob = NULL;
1353 }
1354
1355 cstat = irb->scsw.cmd.cstat;
1356 dstat = irb->scsw.cmd.dstat;
1357
1358 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1359 (dstat & DEV_STAT_UNIT_CHECK) ||
1360 (cstat)) {
1361 if (irb->esw.esw0.erw.cons) {
1362 dev_warn(&channel->ccwdev->dev,
1363 "The qeth device driver failed to recover "
1364 "an error on the device\n");
1365 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1366 CCW_DEVID(channel->ccwdev), cstat,
1367 dstat);
1368 print_hex_dump(KERN_WARNING, "qeth: irb ",
1369 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1370 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1371 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1372 }
1373
1374 rc = qeth_get_problem(card, cdev, irb);
1375 if (rc) {
1376 card->read_or_write_problem = 1;
1377 if (iob)
1378 qeth_cancel_cmd(iob, rc);
1379 qeth_clear_ipacmd_list(card);
1380 qeth_schedule_recovery(card);
1381 return;
1382 }
1383 }
1384
1385 if (iob) {
1386 /* sanity check: */
1387 if (irb->scsw.cmd.count > iob->length) {
1388 qeth_cancel_cmd(iob, -EIO);
1389 return;
1390 }
1391 if (iob->callback)
1392 iob->callback(card, iob,
1393 iob->length - irb->scsw.cmd.count);
1394 }
1395}
1396
1397static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1398 struct qeth_qdio_out_buffer *buf,
1399 enum iucv_tx_notify notification)
1400{
1401 struct sk_buff *skb;
1402
1403 skb_queue_walk(&buf->skb_list, skb) {
1404 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1405 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1406 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1407 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1408 }
1409}
1410
1411static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1412 int budget)
1413{
1414 struct qeth_qdio_out_q *queue = buf->q;
1415 struct sk_buff *skb;
1416
1417 /* release may never happen from within CQ tasklet scope */
1418 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1419
1420 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1421 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1422
1423 /* Empty buffer? */
1424 if (buf->next_element_to_fill == 0)
1425 return;
1426
1427 QETH_TXQ_STAT_INC(queue, bufs);
1428 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1429 if (error) {
1430 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1431 } else {
1432 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1433 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1434 }
1435
1436 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1437 unsigned int bytes = qdisc_pkt_len(skb);
1438 bool is_tso = skb_is_gso(skb);
1439 unsigned int packets;
1440
1441 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1442 if (!error) {
1443 if (skb->ip_summed == CHECKSUM_PARTIAL)
1444 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1445 if (skb_is_nonlinear(skb))
1446 QETH_TXQ_STAT_INC(queue, skbs_sg);
1447 if (is_tso) {
1448 QETH_TXQ_STAT_INC(queue, skbs_tso);
1449 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1450 }
1451 }
1452
1453 napi_consume_skb(skb, budget);
1454 }
1455}
1456
1457static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1458 struct qeth_qdio_out_buffer *buf,
1459 bool error, int budget)
1460{
1461 int i;
1462
1463 /* is PCI flag set on buffer? */
1464 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1465 atomic_dec(&queue->set_pci_flags_count);
1466
1467 qeth_tx_complete_buf(buf, error, budget);
1468
1469 for (i = 0; i < queue->max_elements; ++i) {
1470 void *data = phys_to_virt(buf->buffer->element[i].addr);
1471
1472 if (data && buf->is_header[i])
1473 kmem_cache_free(qeth_core_header_cache, data);
1474 buf->is_header[i] = 0;
1475 }
1476
1477 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1478 buf->next_element_to_fill = 0;
1479 buf->frames = 0;
1480 buf->bytes = 0;
1481 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1482}
1483
1484static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1485{
1486 int j;
1487
1488 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1489 if (!q->bufs[j])
1490 continue;
1491 qeth_cleanup_handled_pending(q, j, 1);
1492 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1493 if (free) {
1494 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1495 q->bufs[j] = NULL;
1496 }
1497 }
1498}
1499
1500void qeth_drain_output_queues(struct qeth_card *card)
1501{
1502 int i;
1503
1504 QETH_CARD_TEXT(card, 2, "clearqdbf");
1505 /* clear outbound buffers to free skbs */
1506 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1507 if (card->qdio.out_qs[i])
1508 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1509 }
1510}
1511EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1512
1513static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1514{
1515 unsigned int max = single ? 1 : card->dev->num_tx_queues;
1516 unsigned int count;
1517 int rc;
1518
1519 count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
1520
1521 rtnl_lock();
1522 rc = netif_set_real_num_tx_queues(card->dev, count);
1523 rtnl_unlock();
1524
1525 if (rc)
1526 return rc;
1527
1528 if (card->qdio.no_out_queues == max)
1529 return 0;
1530
1531 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1532 qeth_free_qdio_queues(card);
1533
1534 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1535 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1536
1537 card->qdio.no_out_queues = max;
1538 return 0;
1539}
1540
1541static int qeth_update_from_chp_desc(struct qeth_card *card)
1542{
1543 struct ccw_device *ccwdev;
1544 struct channel_path_desc_fmt0 *chp_dsc;
1545 int rc = 0;
1546
1547 QETH_CARD_TEXT(card, 2, "chp_desc");
1548
1549 ccwdev = card->data.ccwdev;
1550 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1551 if (!chp_dsc)
1552 return -ENOMEM;
1553
1554 card->info.func_level = 0x4100 + chp_dsc->desc;
1555
1556 if (IS_OSD(card) || IS_OSX(card))
1557 /* CHPP field bit 6 == 1 -> single queue */
1558 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1559
1560 kfree(chp_dsc);
1561 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1562 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1563 return rc;
1564}
1565
1566static void qeth_init_qdio_info(struct qeth_card *card)
1567{
1568 QETH_CARD_TEXT(card, 4, "intqdinf");
1569 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1570 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1571 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1572
1573 /* inbound */
1574 card->qdio.no_in_queues = 1;
1575 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1576 if (IS_IQD(card))
1577 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1578 else
1579 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1580 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1581 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1582 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1583}
1584
1585static void qeth_set_initial_options(struct qeth_card *card)
1586{
1587 card->options.route4.type = NO_ROUTER;
1588 card->options.route6.type = NO_ROUTER;
1589 card->options.isolation = ISOLATION_MODE_NONE;
1590 card->options.cq = QETH_CQ_DISABLED;
1591 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1592}
1593
1594static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1595{
1596 unsigned long flags;
1597 int rc = 0;
1598
1599 spin_lock_irqsave(&card->thread_mask_lock, flags);
1600 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1601 (u8) card->thread_start_mask,
1602 (u8) card->thread_allowed_mask,
1603 (u8) card->thread_running_mask);
1604 rc = (card->thread_start_mask & thread);
1605 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1606 return rc;
1607}
1608
1609static int qeth_do_reset(void *data);
1610static void qeth_start_kernel_thread(struct work_struct *work)
1611{
1612 struct task_struct *ts;
1613 struct qeth_card *card = container_of(work, struct qeth_card,
1614 kernel_thread_starter);
1615 QETH_CARD_TEXT(card , 2, "strthrd");
1616
1617 if (card->read.state != CH_STATE_UP &&
1618 card->write.state != CH_STATE_UP)
1619 return;
1620 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1621 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1622 if (IS_ERR(ts)) {
1623 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1624 qeth_clear_thread_running_bit(card,
1625 QETH_RECOVER_THREAD);
1626 }
1627 }
1628}
1629
1630static void qeth_buffer_reclaim_work(struct work_struct *);
1631static void qeth_setup_card(struct qeth_card *card)
1632{
1633 QETH_CARD_TEXT(card, 2, "setupcrd");
1634
1635 card->info.type = CARD_RDEV(card)->id.driver_info;
1636 card->state = CARD_STATE_DOWN;
1637 spin_lock_init(&card->lock);
1638 spin_lock_init(&card->thread_mask_lock);
1639 mutex_init(&card->conf_mutex);
1640 mutex_init(&card->discipline_mutex);
1641 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1642 INIT_LIST_HEAD(&card->cmd_waiter_list);
1643 init_waitqueue_head(&card->wait_q);
1644 qeth_set_initial_options(card);
1645 /* IP address takeover */
1646 INIT_LIST_HEAD(&card->ipato.entries);
1647 qeth_init_qdio_info(card);
1648 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1649 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1650 hash_init(card->local_addrs4);
1651 hash_init(card->local_addrs6);
1652 spin_lock_init(&card->local_addrs4_lock);
1653 spin_lock_init(&card->local_addrs6_lock);
1654}
1655
1656static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1657{
1658 struct qeth_card *card = container_of(slr, struct qeth_card,
1659 qeth_service_level);
1660 if (card->info.mcl_level[0])
1661 seq_printf(m, "qeth: %s firmware level %s\n",
1662 CARD_BUS_ID(card), card->info.mcl_level);
1663}
1664
1665static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1666{
1667 struct qeth_card *card;
1668
1669 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1670 card = kzalloc(sizeof(*card), GFP_KERNEL);
1671 if (!card)
1672 goto out;
1673 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1674
1675 card->gdev = gdev;
1676 dev_set_drvdata(&gdev->dev, card);
1677 CARD_RDEV(card) = gdev->cdev[0];
1678 CARD_WDEV(card) = gdev->cdev[1];
1679 CARD_DDEV(card) = gdev->cdev[2];
1680
1681 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1682 dev_name(&gdev->dev));
1683 if (!card->event_wq)
1684 goto out_wq;
1685
1686 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1687 if (!card->read_cmd)
1688 goto out_read_cmd;
1689
1690 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1691 qeth_debugfs_root);
1692 debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1693 &qeth_debugfs_local_addr_fops);
1694
1695 card->qeth_service_level.seq_print = qeth_core_sl_print;
1696 register_service_level(&card->qeth_service_level);
1697 return card;
1698
1699out_read_cmd:
1700 destroy_workqueue(card->event_wq);
1701out_wq:
1702 dev_set_drvdata(&gdev->dev, NULL);
1703 kfree(card);
1704out:
1705 return NULL;
1706}
1707
1708static int qeth_clear_channel(struct qeth_card *card,
1709 struct qeth_channel *channel)
1710{
1711 int rc;
1712
1713 QETH_CARD_TEXT(card, 3, "clearch");
1714 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1715 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1716 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1717
1718 if (rc)
1719 return rc;
1720 rc = wait_event_interruptible_timeout(card->wait_q,
1721 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1722 if (rc == -ERESTARTSYS)
1723 return rc;
1724 if (channel->state != CH_STATE_STOPPED)
1725 return -ETIME;
1726 channel->state = CH_STATE_DOWN;
1727 return 0;
1728}
1729
1730static int qeth_halt_channel(struct qeth_card *card,
1731 struct qeth_channel *channel)
1732{
1733 int rc;
1734
1735 QETH_CARD_TEXT(card, 3, "haltch");
1736 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1737 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1738 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1739
1740 if (rc)
1741 return rc;
1742 rc = wait_event_interruptible_timeout(card->wait_q,
1743 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1744 if (rc == -ERESTARTSYS)
1745 return rc;
1746 if (channel->state != CH_STATE_HALTED)
1747 return -ETIME;
1748 return 0;
1749}
1750
1751int qeth_stop_channel(struct qeth_channel *channel)
1752{
1753 struct ccw_device *cdev = channel->ccwdev;
1754 int rc;
1755
1756 rc = ccw_device_set_offline(cdev);
1757
1758 spin_lock_irq(get_ccwdev_lock(cdev));
1759 if (channel->active_cmd) {
1760 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1761 channel->active_cmd);
1762 channel->active_cmd = NULL;
1763 }
1764 cdev->handler = NULL;
1765 spin_unlock_irq(get_ccwdev_lock(cdev));
1766
1767 return rc;
1768}
1769EXPORT_SYMBOL_GPL(qeth_stop_channel);
1770
1771static int qeth_start_channel(struct qeth_channel *channel)
1772{
1773 struct ccw_device *cdev = channel->ccwdev;
1774 int rc;
1775
1776 channel->state = CH_STATE_DOWN;
1777 atomic_set(&channel->irq_pending, 0);
1778
1779 spin_lock_irq(get_ccwdev_lock(cdev));
1780 cdev->handler = qeth_irq;
1781 spin_unlock_irq(get_ccwdev_lock(cdev));
1782
1783 rc = ccw_device_set_online(cdev);
1784 if (rc)
1785 goto err;
1786
1787 return 0;
1788
1789err:
1790 spin_lock_irq(get_ccwdev_lock(cdev));
1791 cdev->handler = NULL;
1792 spin_unlock_irq(get_ccwdev_lock(cdev));
1793 return rc;
1794}
1795
1796static int qeth_halt_channels(struct qeth_card *card)
1797{
1798 int rc1 = 0, rc2 = 0, rc3 = 0;
1799
1800 QETH_CARD_TEXT(card, 3, "haltchs");
1801 rc1 = qeth_halt_channel(card, &card->read);
1802 rc2 = qeth_halt_channel(card, &card->write);
1803 rc3 = qeth_halt_channel(card, &card->data);
1804 if (rc1)
1805 return rc1;
1806 if (rc2)
1807 return rc2;
1808 return rc3;
1809}
1810
1811static int qeth_clear_channels(struct qeth_card *card)
1812{
1813 int rc1 = 0, rc2 = 0, rc3 = 0;
1814
1815 QETH_CARD_TEXT(card, 3, "clearchs");
1816 rc1 = qeth_clear_channel(card, &card->read);
1817 rc2 = qeth_clear_channel(card, &card->write);
1818 rc3 = qeth_clear_channel(card, &card->data);
1819 if (rc1)
1820 return rc1;
1821 if (rc2)
1822 return rc2;
1823 return rc3;
1824}
1825
1826static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1827{
1828 int rc = 0;
1829
1830 QETH_CARD_TEXT(card, 3, "clhacrd");
1831
1832 if (halt)
1833 rc = qeth_halt_channels(card);
1834 if (rc)
1835 return rc;
1836 return qeth_clear_channels(card);
1837}
1838
1839int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1840{
1841 int rc = 0;
1842
1843 QETH_CARD_TEXT(card, 3, "qdioclr");
1844 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1845 QETH_QDIO_CLEANING)) {
1846 case QETH_QDIO_ESTABLISHED:
1847 if (IS_IQD(card))
1848 rc = qdio_shutdown(CARD_DDEV(card),
1849 QDIO_FLAG_CLEANUP_USING_HALT);
1850 else
1851 rc = qdio_shutdown(CARD_DDEV(card),
1852 QDIO_FLAG_CLEANUP_USING_CLEAR);
1853 if (rc)
1854 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1855 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1856 break;
1857 case QETH_QDIO_CLEANING:
1858 return rc;
1859 default:
1860 break;
1861 }
1862 rc = qeth_clear_halt_card(card, use_halt);
1863 if (rc)
1864 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1865 return rc;
1866}
1867EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1868
1869static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1870{
1871 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1872 struct diag26c_vnic_resp *response = NULL;
1873 struct diag26c_vnic_req *request = NULL;
1874 struct ccw_dev_id id;
1875 char userid[80];
1876 int rc = 0;
1877
1878 QETH_CARD_TEXT(card, 2, "vmlayer");
1879
1880 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1881 if (rc)
1882 goto out;
1883
1884 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1885 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1886 if (!request || !response) {
1887 rc = -ENOMEM;
1888 goto out;
1889 }
1890
1891 ccw_device_get_id(CARD_RDEV(card), &id);
1892 request->resp_buf_len = sizeof(*response);
1893 request->resp_version = DIAG26C_VERSION6_VM65918;
1894 request->req_format = DIAG26C_VNIC_INFO;
1895 ASCEBC(userid, 8);
1896 memcpy(&request->sys_name, userid, 8);
1897 request->devno = id.devno;
1898
1899 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1900 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1901 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1902 if (rc)
1903 goto out;
1904 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1905
1906 if (request->resp_buf_len < sizeof(*response) ||
1907 response->version != request->resp_version) {
1908 rc = -EIO;
1909 goto out;
1910 }
1911
1912 if (response->protocol == VNIC_INFO_PROT_L2)
1913 disc = QETH_DISCIPLINE_LAYER2;
1914 else if (response->protocol == VNIC_INFO_PROT_L3)
1915 disc = QETH_DISCIPLINE_LAYER3;
1916
1917out:
1918 kfree(response);
1919 kfree(request);
1920 if (rc)
1921 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1922 return disc;
1923}
1924
1925/* Determine whether the device requires a specific layer discipline */
1926static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1927{
1928 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1929
1930 if (IS_OSM(card) || IS_OSN(card))
1931 disc = QETH_DISCIPLINE_LAYER2;
1932 else if (IS_VM_NIC(card))
1933 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1934 qeth_vm_detect_layer(card);
1935
1936 switch (disc) {
1937 case QETH_DISCIPLINE_LAYER2:
1938 QETH_CARD_TEXT(card, 3, "force l2");
1939 break;
1940 case QETH_DISCIPLINE_LAYER3:
1941 QETH_CARD_TEXT(card, 3, "force l3");
1942 break;
1943 default:
1944 QETH_CARD_TEXT(card, 3, "force no");
1945 }
1946
1947 return disc;
1948}
1949
1950static void qeth_set_blkt_defaults(struct qeth_card *card)
1951{
1952 QETH_CARD_TEXT(card, 2, "cfgblkt");
1953
1954 if (card->info.use_v1_blkt) {
1955 card->info.blkt.time_total = 0;
1956 card->info.blkt.inter_packet = 0;
1957 card->info.blkt.inter_packet_jumbo = 0;
1958 } else {
1959 card->info.blkt.time_total = 250;
1960 card->info.blkt.inter_packet = 5;
1961 card->info.blkt.inter_packet_jumbo = 15;
1962 }
1963}
1964
1965static void qeth_idx_init(struct qeth_card *card)
1966{
1967 memset(&card->seqno, 0, sizeof(card->seqno));
1968
1969 card->token.issuer_rm_w = 0x00010103UL;
1970 card->token.cm_filter_w = 0x00010108UL;
1971 card->token.cm_connection_w = 0x0001010aUL;
1972 card->token.ulp_filter_w = 0x0001010bUL;
1973 card->token.ulp_connection_w = 0x0001010dUL;
1974
1975 switch (card->info.type) {
1976 case QETH_CARD_TYPE_IQD:
1977 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1978 break;
1979 case QETH_CARD_TYPE_OSD:
1980 case QETH_CARD_TYPE_OSN:
1981 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1982 break;
1983 default:
1984 break;
1985 }
1986}
1987
1988static void qeth_idx_finalize_cmd(struct qeth_card *card,
1989 struct qeth_cmd_buffer *iob)
1990{
1991 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1992 QETH_SEQ_NO_LENGTH);
1993 if (iob->channel == &card->write)
1994 card->seqno.trans_hdr++;
1995}
1996
1997static int qeth_peer_func_level(int level)
1998{
1999 if ((level & 0xff) == 8)
2000 return (level & 0xff) + 0x400;
2001 if (((level >> 8) & 3) == 1)
2002 return (level & 0xff) + 0x200;
2003 return level;
2004}
2005
2006static void qeth_mpc_finalize_cmd(struct qeth_card *card,
2007 struct qeth_cmd_buffer *iob)
2008{
2009 qeth_idx_finalize_cmd(card, iob);
2010
2011 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2012 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2013 card->seqno.pdu_hdr++;
2014 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2015 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2016
2017 iob->callback = qeth_release_buffer_cb;
2018}
2019
2020static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2021 struct qeth_cmd_buffer *reply)
2022{
2023 /* MPC cmds are issued strictly in sequence. */
2024 return !IS_IPA(reply->data);
2025}
2026
2027static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2028 void *data,
2029 unsigned int data_length)
2030{
2031 struct qeth_cmd_buffer *iob;
2032
2033 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2034 if (!iob)
2035 return NULL;
2036
2037 memcpy(iob->data, data, data_length);
2038 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2039 iob->data);
2040 iob->finalize = qeth_mpc_finalize_cmd;
2041 iob->match = qeth_mpc_match_reply;
2042 return iob;
2043}
2044
2045/**
2046 * qeth_send_control_data() - send control command to the card
2047 * @card: qeth_card structure pointer
2048 * @iob: qeth_cmd_buffer pointer
2049 * @reply_cb: callback function pointer
2050 * @cb_card: pointer to the qeth_card structure
2051 * @cb_reply: pointer to the qeth_reply structure
2052 * @cb_cmd: pointer to the original iob for non-IPA
2053 * commands, or to the qeth_ipa_cmd structure
2054 * for the IPA commands.
2055 * @reply_param: private pointer passed to the callback
2056 *
2057 * Callback function gets called one or more times, with cb_cmd
2058 * pointing to the response returned by the hardware. Callback
2059 * function must return
2060 * > 0 if more reply blocks are expected,
2061 * 0 if the last or only reply block is received, and
2062 * < 0 on error.
2063 * Callback function can get the value of the reply_param pointer from the
2064 * field 'param' of the structure qeth_reply.
2065 */
2066
2067static int qeth_send_control_data(struct qeth_card *card,
2068 struct qeth_cmd_buffer *iob,
2069 int (*reply_cb)(struct qeth_card *cb_card,
2070 struct qeth_reply *cb_reply,
2071 unsigned long cb_cmd),
2072 void *reply_param)
2073{
2074 struct qeth_channel *channel = iob->channel;
2075 struct qeth_reply *reply = &iob->reply;
2076 long timeout = iob->timeout;
2077 int rc;
2078
2079 QETH_CARD_TEXT(card, 2, "sendctl");
2080
2081 reply->callback = reply_cb;
2082 reply->param = reply_param;
2083
2084 timeout = wait_event_interruptible_timeout(card->wait_q,
2085 qeth_trylock_channel(channel),
2086 timeout);
2087 if (timeout <= 0) {
2088 qeth_put_cmd(iob);
2089 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2090 }
2091
2092 if (iob->finalize)
2093 iob->finalize(card, iob);
2094 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2095
2096 qeth_enqueue_cmd(card, iob);
2097
2098 /* This pairs with iob->callback, and keeps the iob alive after IO: */
2099 qeth_get_cmd(iob);
2100
2101 QETH_CARD_TEXT(card, 6, "noirqpnd");
2102 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2103 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2104 (addr_t) iob, 0, 0, timeout);
2105 if (!rc)
2106 channel->active_cmd = iob;
2107 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2108 if (rc) {
2109 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2110 CARD_DEVID(card), rc);
2111 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2112 qeth_dequeue_cmd(card, iob);
2113 qeth_put_cmd(iob);
2114 qeth_unlock_channel(card, channel);
2115 goto out;
2116 }
2117
2118 timeout = wait_for_completion_interruptible_timeout(&iob->done,
2119 timeout);
2120 if (timeout <= 0)
2121 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2122
2123 qeth_dequeue_cmd(card, iob);
2124
2125 if (reply_cb) {
2126 /* Wait until the callback for a late reply has completed: */
2127 spin_lock_irq(&iob->lock);
2128 if (rc)
2129 /* Zap any callback that's still pending: */
2130 iob->rc = rc;
2131 spin_unlock_irq(&iob->lock);
2132 }
2133
2134 if (!rc)
2135 rc = iob->rc;
2136
2137out:
2138 qeth_put_cmd(iob);
2139 return rc;
2140}
2141
2142struct qeth_node_desc {
2143 struct node_descriptor nd1;
2144 struct node_descriptor nd2;
2145 struct node_descriptor nd3;
2146};
2147
2148static void qeth_read_conf_data_cb(struct qeth_card *card,
2149 struct qeth_cmd_buffer *iob,
2150 unsigned int data_length)
2151{
2152 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2153 int rc = 0;
2154 u8 *tag;
2155
2156 QETH_CARD_TEXT(card, 2, "cfgunit");
2157
2158 if (data_length < sizeof(*nd)) {
2159 rc = -EINVAL;
2160 goto out;
2161 }
2162
2163 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2164 nd->nd1.plant[1] == _ascebc['M'];
2165 tag = (u8 *)&nd->nd1.tag;
2166 card->info.chpid = tag[0];
2167 card->info.unit_addr2 = tag[1];
2168
2169 tag = (u8 *)&nd->nd2.tag;
2170 card->info.cula = tag[1];
2171
2172 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2173 nd->nd3.model[1] == 0xF0 &&
2174 nd->nd3.model[2] >= 0xF1 &&
2175 nd->nd3.model[2] <= 0xF4;
2176
2177out:
2178 qeth_notify_cmd(iob, rc);
2179 qeth_put_cmd(iob);
2180}
2181
2182static int qeth_read_conf_data(struct qeth_card *card)
2183{
2184 struct qeth_channel *channel = &card->data;
2185 struct qeth_cmd_buffer *iob;
2186 struct ciw *ciw;
2187
2188 /* scan for RCD command in extended SenseID data */
2189 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2190 if (!ciw || ciw->cmd == 0)
2191 return -EOPNOTSUPP;
2192 if (ciw->count < sizeof(struct qeth_node_desc))
2193 return -EINVAL;
2194
2195 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2196 if (!iob)
2197 return -ENOMEM;
2198
2199 iob->callback = qeth_read_conf_data_cb;
2200 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2201 iob->data);
2202
2203 return qeth_send_control_data(card, iob, NULL, NULL);
2204}
2205
2206static int qeth_idx_check_activate_response(struct qeth_card *card,
2207 struct qeth_channel *channel,
2208 struct qeth_cmd_buffer *iob)
2209{
2210 int rc;
2211
2212 rc = qeth_check_idx_response(card, iob->data);
2213 if (rc)
2214 return rc;
2215
2216 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2217 return 0;
2218
2219 /* negative reply: */
2220 QETH_CARD_TEXT_(card, 2, "idxneg%c",
2221 QETH_IDX_ACT_CAUSE_CODE(iob->data));
2222
2223 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2224 case QETH_IDX_ACT_ERR_EXCL:
2225 dev_err(&channel->ccwdev->dev,
2226 "The adapter is used exclusively by another host\n");
2227 return -EBUSY;
2228 case QETH_IDX_ACT_ERR_AUTH:
2229 case QETH_IDX_ACT_ERR_AUTH_USER:
2230 dev_err(&channel->ccwdev->dev,
2231 "Setting the device online failed because of insufficient authorization\n");
2232 return -EPERM;
2233 default:
2234 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2235 CCW_DEVID(channel->ccwdev));
2236 return -EIO;
2237 }
2238}
2239
2240static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2241 struct qeth_cmd_buffer *iob,
2242 unsigned int data_length)
2243{
2244 struct qeth_channel *channel = iob->channel;
2245 u16 peer_level;
2246 int rc;
2247
2248 QETH_CARD_TEXT(card, 2, "idxrdcb");
2249
2250 rc = qeth_idx_check_activate_response(card, channel, iob);
2251 if (rc)
2252 goto out;
2253
2254 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2255 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2256 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2257 CCW_DEVID(channel->ccwdev),
2258 card->info.func_level, peer_level);
2259 rc = -EINVAL;
2260 goto out;
2261 }
2262
2263 memcpy(&card->token.issuer_rm_r,
2264 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2265 QETH_MPC_TOKEN_LENGTH);
2266 memcpy(&card->info.mcl_level[0],
2267 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2268
2269out:
2270 qeth_notify_cmd(iob, rc);
2271 qeth_put_cmd(iob);
2272}
2273
2274static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2275 struct qeth_cmd_buffer *iob,
2276 unsigned int data_length)
2277{
2278 struct qeth_channel *channel = iob->channel;
2279 u16 peer_level;
2280 int rc;
2281
2282 QETH_CARD_TEXT(card, 2, "idxwrcb");
2283
2284 rc = qeth_idx_check_activate_response(card, channel, iob);
2285 if (rc)
2286 goto out;
2287
2288 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2289 if ((peer_level & ~0x0100) !=
2290 qeth_peer_func_level(card->info.func_level)) {
2291 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2292 CCW_DEVID(channel->ccwdev),
2293 card->info.func_level, peer_level);
2294 rc = -EINVAL;
2295 }
2296
2297out:
2298 qeth_notify_cmd(iob, rc);
2299 qeth_put_cmd(iob);
2300}
2301
2302static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2303 struct qeth_cmd_buffer *iob)
2304{
2305 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2306 u8 port = ((u8)card->dev->dev_port) | 0x80;
2307 struct ccw1 *ccw = __ccw_from_cmd(iob);
2308 struct ccw_dev_id dev_id;
2309
2310 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2311 iob->data);
2312 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2313 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2314 iob->finalize = qeth_idx_finalize_cmd;
2315
2316 port |= QETH_IDX_ACT_INVAL_FRAME;
2317 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2318 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2319 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2320 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2321 &card->info.func_level, 2);
2322 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
2323 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2324}
2325
2326static int qeth_idx_activate_read_channel(struct qeth_card *card)
2327{
2328 struct qeth_channel *channel = &card->read;
2329 struct qeth_cmd_buffer *iob;
2330 int rc;
2331
2332 QETH_CARD_TEXT(card, 2, "idxread");
2333
2334 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2335 if (!iob)
2336 return -ENOMEM;
2337
2338 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2339 qeth_idx_setup_activate_cmd(card, iob);
2340 iob->callback = qeth_idx_activate_read_channel_cb;
2341
2342 rc = qeth_send_control_data(card, iob, NULL, NULL);
2343 if (rc)
2344 return rc;
2345
2346 channel->state = CH_STATE_UP;
2347 return 0;
2348}
2349
2350static int qeth_idx_activate_write_channel(struct qeth_card *card)
2351{
2352 struct qeth_channel *channel = &card->write;
2353 struct qeth_cmd_buffer *iob;
2354 int rc;
2355
2356 QETH_CARD_TEXT(card, 2, "idxwrite");
2357
2358 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2359 if (!iob)
2360 return -ENOMEM;
2361
2362 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2363 qeth_idx_setup_activate_cmd(card, iob);
2364 iob->callback = qeth_idx_activate_write_channel_cb;
2365
2366 rc = qeth_send_control_data(card, iob, NULL, NULL);
2367 if (rc)
2368 return rc;
2369
2370 channel->state = CH_STATE_UP;
2371 return 0;
2372}
2373
2374static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2375 unsigned long data)
2376{
2377 struct qeth_cmd_buffer *iob;
2378
2379 QETH_CARD_TEXT(card, 2, "cmenblcb");
2380
2381 iob = (struct qeth_cmd_buffer *) data;
2382 memcpy(&card->token.cm_filter_r,
2383 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2384 QETH_MPC_TOKEN_LENGTH);
2385 return 0;
2386}
2387
2388static int qeth_cm_enable(struct qeth_card *card)
2389{
2390 struct qeth_cmd_buffer *iob;
2391
2392 QETH_CARD_TEXT(card, 2, "cmenable");
2393
2394 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2395 if (!iob)
2396 return -ENOMEM;
2397
2398 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2399 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2400 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2401 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2402
2403 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2404}
2405
2406static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2407 unsigned long data)
2408{
2409 struct qeth_cmd_buffer *iob;
2410
2411 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2412
2413 iob = (struct qeth_cmd_buffer *) data;
2414 memcpy(&card->token.cm_connection_r,
2415 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2416 QETH_MPC_TOKEN_LENGTH);
2417 return 0;
2418}
2419
2420static int qeth_cm_setup(struct qeth_card *card)
2421{
2422 struct qeth_cmd_buffer *iob;
2423
2424 QETH_CARD_TEXT(card, 2, "cmsetup");
2425
2426 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2427 if (!iob)
2428 return -ENOMEM;
2429
2430 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2431 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2432 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2433 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2434 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2435 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2436 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2437}
2438
2439static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2440{
2441 struct net_device *dev = card->dev;
2442 unsigned int new_mtu;
2443
2444 if (!max_mtu) {
2445 /* IQD needs accurate max MTU to set up its RX buffers: */
2446 if (IS_IQD(card))
2447 return -EINVAL;
2448 /* tolerate quirky HW: */
2449 max_mtu = ETH_MAX_MTU;
2450 }
2451
2452 rtnl_lock();
2453 if (IS_IQD(card)) {
2454 /* move any device with default MTU to new max MTU: */
2455 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2456
2457 /* adjust RX buffer size to new max MTU: */
2458 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2459 if (dev->max_mtu && dev->max_mtu != max_mtu)
2460 qeth_free_qdio_queues(card);
2461 } else {
2462 if (dev->mtu)
2463 new_mtu = dev->mtu;
2464 /* default MTUs for first setup: */
2465 else if (IS_LAYER2(card))
2466 new_mtu = ETH_DATA_LEN;
2467 else
2468 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2469 }
2470
2471 dev->max_mtu = max_mtu;
2472 dev->mtu = min(new_mtu, max_mtu);
2473 rtnl_unlock();
2474 return 0;
2475}
2476
2477static int qeth_get_mtu_outof_framesize(int framesize)
2478{
2479 switch (framesize) {
2480 case 0x4000:
2481 return 8192;
2482 case 0x6000:
2483 return 16384;
2484 case 0xa000:
2485 return 32768;
2486 case 0xffff:
2487 return 57344;
2488 default:
2489 return 0;
2490 }
2491}
2492
2493static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2494 unsigned long data)
2495{
2496 __u16 mtu, framesize;
2497 __u16 len;
2498 __u8 link_type;
2499 struct qeth_cmd_buffer *iob;
2500
2501 QETH_CARD_TEXT(card, 2, "ulpenacb");
2502
2503 iob = (struct qeth_cmd_buffer *) data;
2504 memcpy(&card->token.ulp_filter_r,
2505 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2506 QETH_MPC_TOKEN_LENGTH);
2507 if (IS_IQD(card)) {
2508 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2509 mtu = qeth_get_mtu_outof_framesize(framesize);
2510 } else {
2511 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2512 }
2513 *(u16 *)reply->param = mtu;
2514
2515 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2516 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2517 memcpy(&link_type,
2518 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2519 card->info.link_type = link_type;
2520 } else
2521 card->info.link_type = 0;
2522 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2523 return 0;
2524}
2525
2526static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2527{
2528 if (IS_OSN(card))
2529 return QETH_PROT_OSN2;
2530 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2531}
2532
2533static int qeth_ulp_enable(struct qeth_card *card)
2534{
2535 u8 prot_type = qeth_mpc_select_prot_type(card);
2536 struct qeth_cmd_buffer *iob;
2537 u16 max_mtu;
2538 int rc;
2539
2540 QETH_CARD_TEXT(card, 2, "ulpenabl");
2541
2542 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2543 if (!iob)
2544 return -ENOMEM;
2545
2546 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2547 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2548 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2549 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2550 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2551 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2552 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2553 if (rc)
2554 return rc;
2555 return qeth_update_max_mtu(card, max_mtu);
2556}
2557
2558static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2559 unsigned long data)
2560{
2561 struct qeth_cmd_buffer *iob;
2562
2563 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2564
2565 iob = (struct qeth_cmd_buffer *) data;
2566 memcpy(&card->token.ulp_connection_r,
2567 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2568 QETH_MPC_TOKEN_LENGTH);
2569 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2570 3)) {
2571 QETH_CARD_TEXT(card, 2, "olmlimit");
2572 dev_err(&card->gdev->dev, "A connection could not be "
2573 "established because of an OLM limit\n");
2574 return -EMLINK;
2575 }
2576 return 0;
2577}
2578
2579static int qeth_ulp_setup(struct qeth_card *card)
2580{
2581 __u16 temp;
2582 struct qeth_cmd_buffer *iob;
2583 struct ccw_dev_id dev_id;
2584
2585 QETH_CARD_TEXT(card, 2, "ulpsetup");
2586
2587 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2588 if (!iob)
2589 return -ENOMEM;
2590
2591 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2592 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2593 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2594 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2595 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2596 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2597
2598 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2599 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2600 temp = (card->info.cula << 8) + card->info.unit_addr2;
2601 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2602 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2603}
2604
2605static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2606{
2607 struct qeth_qdio_out_buffer *newbuf;
2608
2609 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2610 if (!newbuf)
2611 return -ENOMEM;
2612
2613 newbuf->buffer = q->qdio_bufs[bidx];
2614 skb_queue_head_init(&newbuf->skb_list);
2615 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2616 newbuf->q = q;
2617 newbuf->next_pending = q->bufs[bidx];
2618 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2619 q->bufs[bidx] = newbuf;
2620 return 0;
2621}
2622
2623static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2624{
2625 if (!q)
2626 return;
2627
2628 qeth_drain_output_queue(q, true);
2629 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2630 kfree(q);
2631}
2632
2633static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2634{
2635 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2636
2637 if (!q)
2638 return NULL;
2639
2640 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2641 kfree(q);
2642 return NULL;
2643 }
2644 return q;
2645}
2646
2647static void qeth_tx_completion_timer(struct timer_list *timer)
2648{
2649 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2650
2651 napi_schedule(&queue->napi);
2652 QETH_TXQ_STAT_INC(queue, completion_timer);
2653}
2654
2655static int qeth_alloc_qdio_queues(struct qeth_card *card)
2656{
2657 int i, j;
2658
2659 QETH_CARD_TEXT(card, 2, "allcqdbf");
2660
2661 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2662 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2663 return 0;
2664
2665 QETH_CARD_TEXT(card, 2, "inq");
2666 card->qdio.in_q = qeth_alloc_qdio_queue();
2667 if (!card->qdio.in_q)
2668 goto out_nomem;
2669
2670 /* inbound buffer pool */
2671 if (qeth_alloc_buffer_pool(card))
2672 goto out_freeinq;
2673
2674 /* outbound */
2675 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2676 struct qeth_qdio_out_q *queue;
2677
2678 queue = qeth_alloc_output_queue();
2679 if (!queue)
2680 goto out_freeoutq;
2681 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2682 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2683 card->qdio.out_qs[i] = queue;
2684 queue->card = card;
2685 queue->queue_no = i;
2686 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2687 queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2688 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2689
2690 /* give outbound qeth_qdio_buffers their qdio_buffers */
2691 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2692 WARN_ON(queue->bufs[j]);
2693 if (qeth_init_qdio_out_buf(queue, j))
2694 goto out_freeoutqbufs;
2695 }
2696 }
2697
2698 /* completion */
2699 if (qeth_alloc_cq(card))
2700 goto out_freeoutq;
2701
2702 return 0;
2703
2704out_freeoutqbufs:
2705 while (j > 0) {
2706 --j;
2707 kmem_cache_free(qeth_qdio_outbuf_cache,
2708 card->qdio.out_qs[i]->bufs[j]);
2709 card->qdio.out_qs[i]->bufs[j] = NULL;
2710 }
2711out_freeoutq:
2712 while (i > 0) {
2713 qeth_free_output_queue(card->qdio.out_qs[--i]);
2714 card->qdio.out_qs[i] = NULL;
2715 }
2716 qeth_free_buffer_pool(card);
2717out_freeinq:
2718 qeth_free_qdio_queue(card->qdio.in_q);
2719 card->qdio.in_q = NULL;
2720out_nomem:
2721 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2722 return -ENOMEM;
2723}
2724
2725static void qeth_free_qdio_queues(struct qeth_card *card)
2726{
2727 int i, j;
2728
2729 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2730 QETH_QDIO_UNINITIALIZED)
2731 return;
2732
2733 qeth_free_cq(card);
2734 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2735 if (card->qdio.in_q->bufs[j].rx_skb)
2736 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2737 }
2738 qeth_free_qdio_queue(card->qdio.in_q);
2739 card->qdio.in_q = NULL;
2740 /* inbound buffer pool */
2741 qeth_free_buffer_pool(card);
2742 /* free outbound qdio_qs */
2743 for (i = 0; i < card->qdio.no_out_queues; i++) {
2744 qeth_free_output_queue(card->qdio.out_qs[i]);
2745 card->qdio.out_qs[i] = NULL;
2746 }
2747}
2748
2749static void qeth_create_qib_param_field(struct qeth_card *card,
2750 char *param_field)
2751{
2752
2753 param_field[0] = _ascebc['P'];
2754 param_field[1] = _ascebc['C'];
2755 param_field[2] = _ascebc['I'];
2756 param_field[3] = _ascebc['T'];
2757 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2758 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2759 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2760}
2761
2762static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2763 char *param_field)
2764{
2765 param_field[16] = _ascebc['B'];
2766 param_field[17] = _ascebc['L'];
2767 param_field[18] = _ascebc['K'];
2768 param_field[19] = _ascebc['T'];
2769 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
2770 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
2771 *((unsigned int *) (¶m_field[28])) =
2772 card->info.blkt.inter_packet_jumbo;
2773}
2774
2775static int qeth_qdio_activate(struct qeth_card *card)
2776{
2777 QETH_CARD_TEXT(card, 3, "qdioact");
2778 return qdio_activate(CARD_DDEV(card));
2779}
2780
2781static int qeth_dm_act(struct qeth_card *card)
2782{
2783 struct qeth_cmd_buffer *iob;
2784
2785 QETH_CARD_TEXT(card, 2, "dmact");
2786
2787 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2788 if (!iob)
2789 return -ENOMEM;
2790
2791 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2792 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2793 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2794 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2795 return qeth_send_control_data(card, iob, NULL, NULL);
2796}
2797
2798static int qeth_mpc_initialize(struct qeth_card *card)
2799{
2800 int rc;
2801
2802 QETH_CARD_TEXT(card, 2, "mpcinit");
2803
2804 rc = qeth_issue_next_read(card);
2805 if (rc) {
2806 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2807 return rc;
2808 }
2809 rc = qeth_cm_enable(card);
2810 if (rc) {
2811 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2812 return rc;
2813 }
2814 rc = qeth_cm_setup(card);
2815 if (rc) {
2816 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2817 return rc;
2818 }
2819 rc = qeth_ulp_enable(card);
2820 if (rc) {
2821 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2822 return rc;
2823 }
2824 rc = qeth_ulp_setup(card);
2825 if (rc) {
2826 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2827 return rc;
2828 }
2829 rc = qeth_alloc_qdio_queues(card);
2830 if (rc) {
2831 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2832 return rc;
2833 }
2834 rc = qeth_qdio_establish(card);
2835 if (rc) {
2836 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2837 qeth_free_qdio_queues(card);
2838 return rc;
2839 }
2840 rc = qeth_qdio_activate(card);
2841 if (rc) {
2842 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2843 return rc;
2844 }
2845 rc = qeth_dm_act(card);
2846 if (rc) {
2847 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2848 return rc;
2849 }
2850
2851 return 0;
2852}
2853
2854void qeth_print_status_message(struct qeth_card *card)
2855{
2856 switch (card->info.type) {
2857 case QETH_CARD_TYPE_OSD:
2858 case QETH_CARD_TYPE_OSM:
2859 case QETH_CARD_TYPE_OSX:
2860 /* VM will use a non-zero first character
2861 * to indicate a HiperSockets like reporting
2862 * of the level OSA sets the first character to zero
2863 * */
2864 if (!card->info.mcl_level[0]) {
2865 sprintf(card->info.mcl_level, "%02x%02x",
2866 card->info.mcl_level[2],
2867 card->info.mcl_level[3]);
2868 break;
2869 }
2870 /* fallthrough */
2871 case QETH_CARD_TYPE_IQD:
2872 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2873 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2874 card->info.mcl_level[0]];
2875 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2876 card->info.mcl_level[1]];
2877 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2878 card->info.mcl_level[2]];
2879 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2880 card->info.mcl_level[3]];
2881 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2882 }
2883 break;
2884 default:
2885 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2886 }
2887 dev_info(&card->gdev->dev,
2888 "Device is a%s card%s%s%s\nwith link type %s.\n",
2889 qeth_get_cardname(card),
2890 (card->info.mcl_level[0]) ? " (level: " : "",
2891 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2892 (card->info.mcl_level[0]) ? ")" : "",
2893 qeth_get_cardname_short(card));
2894}
2895EXPORT_SYMBOL_GPL(qeth_print_status_message);
2896
2897static void qeth_initialize_working_pool_list(struct qeth_card *card)
2898{
2899 struct qeth_buffer_pool_entry *entry;
2900
2901 QETH_CARD_TEXT(card, 5, "inwrklst");
2902
2903 list_for_each_entry(entry,
2904 &card->qdio.init_pool.entry_list, init_list) {
2905 qeth_put_buffer_pool_entry(card, entry);
2906 }
2907}
2908
2909static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2910 struct qeth_card *card)
2911{
2912 struct qeth_buffer_pool_entry *entry;
2913 int i, free;
2914
2915 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2916 return NULL;
2917
2918 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2919 free = 1;
2920 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2921 if (page_count(entry->elements[i]) > 1) {
2922 free = 0;
2923 break;
2924 }
2925 }
2926 if (free) {
2927 list_del_init(&entry->list);
2928 return entry;
2929 }
2930 }
2931
2932 /* no free buffer in pool so take first one and swap pages */
2933 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2934 struct qeth_buffer_pool_entry, list);
2935 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2936 if (page_count(entry->elements[i]) > 1) {
2937 struct page *page = dev_alloc_page();
2938
2939 if (!page)
2940 return NULL;
2941
2942 __free_page(entry->elements[i]);
2943 entry->elements[i] = page;
2944 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2945 }
2946 }
2947 list_del_init(&entry->list);
2948 return entry;
2949}
2950
2951static int qeth_init_input_buffer(struct qeth_card *card,
2952 struct qeth_qdio_buffer *buf)
2953{
2954 struct qeth_buffer_pool_entry *pool_entry;
2955 int i;
2956
2957 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2958 buf->rx_skb = netdev_alloc_skb(card->dev,
2959 ETH_HLEN +
2960 sizeof(struct ipv6hdr));
2961 if (!buf->rx_skb)
2962 return -ENOMEM;
2963 }
2964
2965 pool_entry = qeth_find_free_buffer_pool_entry(card);
2966 if (!pool_entry)
2967 return -ENOBUFS;
2968
2969 /*
2970 * since the buffer is accessed only from the input_tasklet
2971 * there shouldn't be a need to synchronize; also, since we use
2972 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2973 * buffers
2974 */
2975
2976 buf->pool_entry = pool_entry;
2977 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2978 buf->buffer->element[i].length = PAGE_SIZE;
2979 buf->buffer->element[i].addr =
2980 page_to_phys(pool_entry->elements[i]);
2981 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2982 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2983 else
2984 buf->buffer->element[i].eflags = 0;
2985 buf->buffer->element[i].sflags = 0;
2986 }
2987 return 0;
2988}
2989
2990static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2991 struct qeth_qdio_out_q *queue)
2992{
2993 if (!IS_IQD(card) ||
2994 qeth_iqd_is_mcast_queue(card, queue) ||
2995 card->options.cq == QETH_CQ_ENABLED ||
2996 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2997 return 1;
2998
2999 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3000}
3001
3002static int qeth_init_qdio_queues(struct qeth_card *card)
3003{
3004 unsigned int i;
3005 int rc;
3006
3007 QETH_CARD_TEXT(card, 2, "initqdqs");
3008
3009 /* inbound queue */
3010 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3011 memset(&card->rx, 0, sizeof(struct qeth_rx));
3012
3013 qeth_initialize_working_pool_list(card);
3014 /*give only as many buffers to hardware as we have buffer pool entries*/
3015 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
3016 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3017 if (rc)
3018 return rc;
3019 }
3020
3021 card->qdio.in_q->next_buf_to_init =
3022 card->qdio.in_buf_pool.buf_count - 1;
3023 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3024 card->qdio.in_buf_pool.buf_count - 1);
3025 if (rc) {
3026 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3027 return rc;
3028 }
3029
3030 /* completion */
3031 rc = qeth_cq_init(card);
3032 if (rc) {
3033 return rc;
3034 }
3035
3036 /* outbound queue */
3037 for (i = 0; i < card->qdio.no_out_queues; ++i) {
3038 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3039
3040 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3041 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3042 queue->next_buf_to_fill = 0;
3043 queue->do_pack = 0;
3044 queue->prev_hdr = NULL;
3045 queue->coalesced_frames = 0;
3046 queue->bulk_start = 0;
3047 queue->bulk_count = 0;
3048 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3049 atomic_set(&queue->used_buffers, 0);
3050 atomic_set(&queue->set_pci_flags_count, 0);
3051 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3052 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3053 }
3054 return 0;
3055}
3056
3057static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3058 struct qeth_cmd_buffer *iob)
3059{
3060 qeth_mpc_finalize_cmd(card, iob);
3061
3062 /* override with IPA-specific values: */
3063 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3064}
3065
3066void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3067 u16 cmd_length,
3068 bool (*match)(struct qeth_cmd_buffer *iob,
3069 struct qeth_cmd_buffer *reply))
3070{
3071 u8 prot_type = qeth_mpc_select_prot_type(card);
3072 u16 total_length = iob->length;
3073
3074 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3075 iob->data);
3076 iob->finalize = qeth_ipa_finalize_cmd;
3077 iob->match = match;
3078
3079 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3080 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3081 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3082 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3083 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3084 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3085 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3086 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3087}
3088EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3089
3090static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3091 struct qeth_cmd_buffer *reply)
3092{
3093 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3094
3095 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3096}
3097
3098struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3099 enum qeth_ipa_cmds cmd_code,
3100 enum qeth_prot_versions prot,
3101 unsigned int data_length)
3102{
3103 enum qeth_link_types link_type = card->info.link_type;
3104 struct qeth_cmd_buffer *iob;
3105 struct qeth_ipacmd_hdr *hdr;
3106
3107 data_length += offsetof(struct qeth_ipa_cmd, data);
3108 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3109 QETH_IPA_TIMEOUT);
3110 if (!iob)
3111 return NULL;
3112
3113 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
3114
3115 hdr = &__ipa_cmd(iob)->hdr;
3116 hdr->command = cmd_code;
3117 hdr->initiator = IPA_CMD_INITIATOR_HOST;
3118 /* hdr->seqno is set by qeth_send_control_data() */
3119 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
3120 hdr->rel_adapter_no = (u8) card->dev->dev_port;
3121 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3122 hdr->param_count = 1;
3123 hdr->prot_version = prot;
3124 return iob;
3125}
3126EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3127
3128static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3129 struct qeth_reply *reply, unsigned long data)
3130{
3131 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3132
3133 return (cmd->hdr.return_code) ? -EIO : 0;
3134}
3135
3136/**
3137 * qeth_send_ipa_cmd() - send an IPA command
3138 *
3139 * See qeth_send_control_data() for explanation of the arguments.
3140 */
3141
3142int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3143 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3144 unsigned long),
3145 void *reply_param)
3146{
3147 int rc;
3148
3149 QETH_CARD_TEXT(card, 4, "sendipa");
3150
3151 if (card->read_or_write_problem) {
3152 qeth_put_cmd(iob);
3153 return -EIO;
3154 }
3155
3156 if (reply_cb == NULL)
3157 reply_cb = qeth_send_ipa_cmd_cb;
3158 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3159 if (rc == -ETIME) {
3160 qeth_clear_ipacmd_list(card);
3161 qeth_schedule_recovery(card);
3162 }
3163 return rc;
3164}
3165EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3166
3167static int qeth_send_startlan_cb(struct qeth_card *card,
3168 struct qeth_reply *reply, unsigned long data)
3169{
3170 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3171
3172 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3173 return -ENETDOWN;
3174
3175 return (cmd->hdr.return_code) ? -EIO : 0;
3176}
3177
3178static int qeth_send_startlan(struct qeth_card *card)
3179{
3180 struct qeth_cmd_buffer *iob;
3181
3182 QETH_CARD_TEXT(card, 2, "strtlan");
3183
3184 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3185 if (!iob)
3186 return -ENOMEM;
3187 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3188}
3189
3190static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3191{
3192 if (!cmd->hdr.return_code)
3193 cmd->hdr.return_code =
3194 cmd->data.setadapterparms.hdr.return_code;
3195 return cmd->hdr.return_code;
3196}
3197
3198static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3199 struct qeth_reply *reply, unsigned long data)
3200{
3201 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3202
3203 QETH_CARD_TEXT(card, 3, "quyadpcb");
3204 if (qeth_setadpparms_inspect_rc(cmd))
3205 return -EIO;
3206
3207 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
3208 card->info.link_type =
3209 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
3210 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3211 }
3212 card->options.adp.supported =
3213 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
3214 return 0;
3215}
3216
3217static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3218 enum qeth_ipa_setadp_cmd adp_cmd,
3219 unsigned int data_length)
3220{
3221 struct qeth_ipacmd_setadpparms_hdr *hdr;
3222 struct qeth_cmd_buffer *iob;
3223
3224 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3225 data_length +
3226 offsetof(struct qeth_ipacmd_setadpparms,
3227 data));
3228 if (!iob)
3229 return NULL;
3230
3231 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3232 hdr->cmdlength = sizeof(*hdr) + data_length;
3233 hdr->command_code = adp_cmd;
3234 hdr->used_total = 1;
3235 hdr->seq_no = 1;
3236 return iob;
3237}
3238
3239static int qeth_query_setadapterparms(struct qeth_card *card)
3240{
3241 int rc;
3242 struct qeth_cmd_buffer *iob;
3243
3244 QETH_CARD_TEXT(card, 3, "queryadp");
3245 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3246 SETADP_DATA_SIZEOF(query_cmds_supp));
3247 if (!iob)
3248 return -ENOMEM;
3249 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3250 return rc;
3251}
3252
3253static int qeth_query_ipassists_cb(struct qeth_card *card,
3254 struct qeth_reply *reply, unsigned long data)
3255{
3256 struct qeth_ipa_cmd *cmd;
3257
3258 QETH_CARD_TEXT(card, 2, "qipasscb");
3259
3260 cmd = (struct qeth_ipa_cmd *) data;
3261
3262 switch (cmd->hdr.return_code) {
3263 case IPA_RC_SUCCESS:
3264 break;
3265 case IPA_RC_NOTSUPP:
3266 case IPA_RC_L2_UNSUPPORTED_CMD:
3267 QETH_CARD_TEXT(card, 2, "ipaunsup");
3268 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3269 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3270 return -EOPNOTSUPP;
3271 default:
3272 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3273 CARD_DEVID(card), cmd->hdr.return_code);
3274 return -EIO;
3275 }
3276
3277 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3278 card->options.ipa4 = cmd->hdr.assists;
3279 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3280 card->options.ipa6 = cmd->hdr.assists;
3281 else
3282 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3283 CARD_DEVID(card));
3284 return 0;
3285}
3286
3287static int qeth_query_ipassists(struct qeth_card *card,
3288 enum qeth_prot_versions prot)
3289{
3290 int rc;
3291 struct qeth_cmd_buffer *iob;
3292
3293 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3294 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3295 if (!iob)
3296 return -ENOMEM;
3297 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3298 return rc;
3299}
3300
3301static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3302 struct qeth_reply *reply, unsigned long data)
3303{
3304 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3305 struct qeth_query_switch_attributes *attrs;
3306 struct qeth_switch_info *sw_info;
3307
3308 QETH_CARD_TEXT(card, 2, "qswiatcb");
3309 if (qeth_setadpparms_inspect_rc(cmd))
3310 return -EIO;
3311
3312 sw_info = (struct qeth_switch_info *)reply->param;
3313 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3314 sw_info->capabilities = attrs->capabilities;
3315 sw_info->settings = attrs->settings;
3316 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3317 sw_info->settings);
3318 return 0;
3319}
3320
3321int qeth_query_switch_attributes(struct qeth_card *card,
3322 struct qeth_switch_info *sw_info)
3323{
3324 struct qeth_cmd_buffer *iob;
3325
3326 QETH_CARD_TEXT(card, 2, "qswiattr");
3327 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3328 return -EOPNOTSUPP;
3329 if (!netif_carrier_ok(card->dev))
3330 return -ENOMEDIUM;
3331 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3332 if (!iob)
3333 return -ENOMEM;
3334 return qeth_send_ipa_cmd(card, iob,
3335 qeth_query_switch_attributes_cb, sw_info);
3336}
3337
3338struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3339 enum qeth_diags_cmds sub_cmd,
3340 unsigned int data_length)
3341{
3342 struct qeth_ipacmd_diagass *cmd;
3343 struct qeth_cmd_buffer *iob;
3344
3345 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3346 DIAG_HDR_LEN + data_length);
3347 if (!iob)
3348 return NULL;
3349
3350 cmd = &__ipa_cmd(iob)->data.diagass;
3351 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3352 cmd->subcmd = sub_cmd;
3353 return iob;
3354}
3355EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3356
3357static int qeth_query_setdiagass_cb(struct qeth_card *card,
3358 struct qeth_reply *reply, unsigned long data)
3359{
3360 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3361 u16 rc = cmd->hdr.return_code;
3362
3363 if (rc) {
3364 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3365 return -EIO;
3366 }
3367
3368 card->info.diagass_support = cmd->data.diagass.ext;
3369 return 0;
3370}
3371
3372static int qeth_query_setdiagass(struct qeth_card *card)
3373{
3374 struct qeth_cmd_buffer *iob;
3375
3376 QETH_CARD_TEXT(card, 2, "qdiagass");
3377 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3378 if (!iob)
3379 return -ENOMEM;
3380 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3381}
3382
3383static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3384{
3385 unsigned long info = get_zeroed_page(GFP_KERNEL);
3386 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3387 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3388 struct ccw_dev_id ccwid;
3389 int level;
3390
3391 tid->chpid = card->info.chpid;
3392 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3393 tid->ssid = ccwid.ssid;
3394 tid->devno = ccwid.devno;
3395 if (!info)
3396 return;
3397 level = stsi(NULL, 0, 0, 0);
3398 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3399 tid->lparnr = info222->lpar_number;
3400 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3401 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3402 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3403 }
3404 free_page(info);
3405 return;
3406}
3407
3408static int qeth_hw_trap_cb(struct qeth_card *card,
3409 struct qeth_reply *reply, unsigned long data)
3410{
3411 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3412 u16 rc = cmd->hdr.return_code;
3413
3414 if (rc) {
3415 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3416 return -EIO;
3417 }
3418 return 0;
3419}
3420
3421int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3422{
3423 struct qeth_cmd_buffer *iob;
3424 struct qeth_ipa_cmd *cmd;
3425
3426 QETH_CARD_TEXT(card, 2, "diagtrap");
3427 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3428 if (!iob)
3429 return -ENOMEM;
3430 cmd = __ipa_cmd(iob);
3431 cmd->data.diagass.type = 1;
3432 cmd->data.diagass.action = action;
3433 switch (action) {
3434 case QETH_DIAGS_TRAP_ARM:
3435 cmd->data.diagass.options = 0x0003;
3436 cmd->data.diagass.ext = 0x00010000 +
3437 sizeof(struct qeth_trap_id);
3438 qeth_get_trap_id(card,
3439 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3440 break;
3441 case QETH_DIAGS_TRAP_DISARM:
3442 cmd->data.diagass.options = 0x0001;
3443 break;
3444 case QETH_DIAGS_TRAP_CAPTURE:
3445 break;
3446 }
3447 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3448}
3449
3450static int qeth_check_qdio_errors(struct qeth_card *card,
3451 struct qdio_buffer *buf,
3452 unsigned int qdio_error,
3453 const char *dbftext)
3454{
3455 if (qdio_error) {
3456 QETH_CARD_TEXT(card, 2, dbftext);
3457 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3458 buf->element[15].sflags);
3459 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3460 buf->element[14].sflags);
3461 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3462 if ((buf->element[15].sflags) == 0x12) {
3463 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3464 return 0;
3465 } else
3466 return 1;
3467 }
3468 return 0;
3469}
3470
3471static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3472{
3473 struct qeth_qdio_q *queue = card->qdio.in_q;
3474 struct list_head *lh;
3475 int count;
3476 int i;
3477 int rc;
3478 int newcount = 0;
3479
3480 count = (index < queue->next_buf_to_init)?
3481 card->qdio.in_buf_pool.buf_count -
3482 (queue->next_buf_to_init - index) :
3483 card->qdio.in_buf_pool.buf_count -
3484 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3485 /* only requeue at a certain threshold to avoid SIGAs */
3486 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3487 for (i = queue->next_buf_to_init;
3488 i < queue->next_buf_to_init + count; ++i) {
3489 if (qeth_init_input_buffer(card,
3490 &queue->bufs[QDIO_BUFNR(i)])) {
3491 break;
3492 } else {
3493 newcount++;
3494 }
3495 }
3496
3497 if (newcount < count) {
3498 /* we are in memory shortage so we switch back to
3499 traditional skb allocation and drop packages */
3500 atomic_set(&card->force_alloc_skb, 3);
3501 count = newcount;
3502 } else {
3503 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3504 }
3505
3506 if (!count) {
3507 i = 0;
3508 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3509 i++;
3510 if (i == card->qdio.in_buf_pool.buf_count) {
3511 QETH_CARD_TEXT(card, 2, "qsarbw");
3512 card->reclaim_index = index;
3513 schedule_delayed_work(
3514 &card->buffer_reclaim_work,
3515 QETH_RECLAIM_WORK_TIME);
3516 }
3517 return;
3518 }
3519
3520 /*
3521 * according to old code it should be avoided to requeue all
3522 * 128 buffers in order to benefit from PCI avoidance.
3523 * this function keeps at least one buffer (the buffer at
3524 * 'index') un-requeued -> this buffer is the first buffer that
3525 * will be requeued the next time
3526 */
3527 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3528 queue->next_buf_to_init, count);
3529 if (rc) {
3530 QETH_CARD_TEXT(card, 2, "qinberr");
3531 }
3532 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3533 count);
3534 }
3535}
3536
3537static void qeth_buffer_reclaim_work(struct work_struct *work)
3538{
3539 struct qeth_card *card = container_of(work, struct qeth_card,
3540 buffer_reclaim_work.work);
3541
3542 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3543 qeth_queue_input_buffer(card, card->reclaim_index);
3544}
3545
3546static void qeth_handle_send_error(struct qeth_card *card,
3547 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3548{
3549 int sbalf15 = buffer->buffer->element[15].sflags;
3550
3551 QETH_CARD_TEXT(card, 6, "hdsnderr");
3552 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3553
3554 if (!qdio_err)
3555 return;
3556
3557 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3558 return;
3559
3560 QETH_CARD_TEXT(card, 1, "lnkfail");
3561 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3562 (u16)qdio_err, (u8)sbalf15);
3563}
3564
3565/**
3566 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3567 * @queue: queue to check for packing buffer
3568 *
3569 * Returns number of buffers that were prepared for flush.
3570 */
3571static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3572{
3573 struct qeth_qdio_out_buffer *buffer;
3574
3575 buffer = queue->bufs[queue->next_buf_to_fill];
3576 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3577 (buffer->next_element_to_fill > 0)) {
3578 /* it's a packing buffer */
3579 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3580 queue->next_buf_to_fill =
3581 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3582 return 1;
3583 }
3584 return 0;
3585}
3586
3587/*
3588 * Switched to packing state if the number of used buffers on a queue
3589 * reaches a certain limit.
3590 */
3591static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3592{
3593 if (!queue->do_pack) {
3594 if (atomic_read(&queue->used_buffers)
3595 >= QETH_HIGH_WATERMARK_PACK){
3596 /* switch non-PACKING -> PACKING */
3597 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3598 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3599 queue->do_pack = 1;
3600 }
3601 }
3602}
3603
3604/*
3605 * Switches from packing to non-packing mode. If there is a packing
3606 * buffer on the queue this buffer will be prepared to be flushed.
3607 * In that case 1 is returned to inform the caller. If no buffer
3608 * has to be flushed, zero is returned.
3609 */
3610static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3611{
3612 if (queue->do_pack) {
3613 if (atomic_read(&queue->used_buffers)
3614 <= QETH_LOW_WATERMARK_PACK) {
3615 /* switch PACKING -> non-PACKING */
3616 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3617 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3618 queue->do_pack = 0;
3619 return qeth_prep_flush_pack_buffer(queue);
3620 }
3621 }
3622 return 0;
3623}
3624
3625static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3626 int count)
3627{
3628 struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3629 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3630 struct qeth_card *card = queue->card;
3631 int rc;
3632 int i;
3633
3634 for (i = index; i < index + count; ++i) {
3635 unsigned int bidx = QDIO_BUFNR(i);
3636 struct sk_buff *skb;
3637
3638 buf = queue->bufs[bidx];
3639 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3640 SBAL_EFLAGS_LAST_ENTRY;
3641 queue->coalesced_frames += buf->frames;
3642
3643 if (queue->bufstates)
3644 queue->bufstates[bidx].user = buf;
3645
3646 if (IS_IQD(card)) {
3647 skb_queue_walk(&buf->skb_list, skb)
3648 skb_tx_timestamp(skb);
3649 }
3650 }
3651
3652 if (!IS_IQD(card)) {
3653 if (!queue->do_pack) {
3654 if ((atomic_read(&queue->used_buffers) >=
3655 (QETH_HIGH_WATERMARK_PACK -
3656 QETH_WATERMARK_PACK_FUZZ)) &&
3657 !atomic_read(&queue->set_pci_flags_count)) {
3658 /* it's likely that we'll go to packing
3659 * mode soon */
3660 atomic_inc(&queue->set_pci_flags_count);
3661 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3662 }
3663 } else {
3664 if (!atomic_read(&queue->set_pci_flags_count)) {
3665 /*
3666 * there's no outstanding PCI any more, so we
3667 * have to request a PCI to be sure the the PCI
3668 * will wake at some time in the future then we
3669 * can flush packed buffers that might still be
3670 * hanging around, which can happen if no
3671 * further send was requested by the stack
3672 */
3673 atomic_inc(&queue->set_pci_flags_count);
3674 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3675 }
3676 }
3677
3678 if (atomic_read(&queue->set_pci_flags_count))
3679 qdio_flags |= QDIO_FLAG_PCI_OUT;
3680 }
3681
3682 QETH_TXQ_STAT_INC(queue, doorbell);
3683 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3684 queue->queue_no, index, count);
3685
3686 /* Fake the TX completion interrupt: */
3687 if (IS_IQD(card)) {
3688 unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3689 unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
3690
3691 if (frames && queue->coalesced_frames >= frames) {
3692 napi_schedule(&queue->napi);
3693 queue->coalesced_frames = 0;
3694 QETH_TXQ_STAT_INC(queue, coal_frames);
3695 } else if (usecs) {
3696 qeth_tx_arm_timer(queue, usecs);
3697 }
3698 }
3699
3700 if (rc) {
3701 /* ignore temporary SIGA errors without busy condition */
3702 if (rc == -ENOBUFS)
3703 return;
3704 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3705 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3706 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3707 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3708 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3709
3710 /* this must not happen under normal circumstances. if it
3711 * happens something is really wrong -> recover */
3712 qeth_schedule_recovery(queue->card);
3713 return;
3714 }
3715}
3716
3717static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3718{
3719 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3720
3721 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3722 queue->prev_hdr = NULL;
3723 queue->bulk_count = 0;
3724}
3725
3726static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3727{
3728 int index;
3729 int flush_cnt = 0;
3730 int q_was_packing = 0;
3731
3732 /*
3733 * check if weed have to switch to non-packing mode or if
3734 * we have to get a pci flag out on the queue
3735 */
3736 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3737 !atomic_read(&queue->set_pci_flags_count)) {
3738 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3739 QETH_OUT_Q_UNLOCKED) {
3740 /*
3741 * If we get in here, there was no action in
3742 * do_send_packet. So, we check if there is a
3743 * packing buffer to be flushed here.
3744 */
3745 index = queue->next_buf_to_fill;
3746 q_was_packing = queue->do_pack;
3747 /* queue->do_pack may change */
3748 barrier();
3749 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3750 if (!flush_cnt &&
3751 !atomic_read(&queue->set_pci_flags_count))
3752 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3753 if (q_was_packing)
3754 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3755 if (flush_cnt)
3756 qeth_flush_buffers(queue, index, flush_cnt);
3757 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3758 }
3759 }
3760}
3761
3762static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3763{
3764 struct qeth_card *card = (struct qeth_card *)card_ptr;
3765
3766 napi_schedule_irqoff(&card->napi);
3767}
3768
3769int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3770{
3771 int rc;
3772
3773 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3774 rc = -1;
3775 goto out;
3776 } else {
3777 if (card->options.cq == cq) {
3778 rc = 0;
3779 goto out;
3780 }
3781
3782 qeth_free_qdio_queues(card);
3783 card->options.cq = cq;
3784 rc = 0;
3785 }
3786out:
3787 return rc;
3788
3789}
3790EXPORT_SYMBOL_GPL(qeth_configure_cq);
3791
3792static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3793 unsigned int queue, int first_element,
3794 int count)
3795{
3796 struct qeth_qdio_q *cq = card->qdio.c_q;
3797 int i;
3798 int rc;
3799
3800 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3801 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3802 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3803
3804 if (qdio_err) {
3805 netif_tx_stop_all_queues(card->dev);
3806 qeth_schedule_recovery(card);
3807 return;
3808 }
3809
3810 for (i = first_element; i < first_element + count; ++i) {
3811 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3812 int e = 0;
3813
3814 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3815 buffer->element[e].addr) {
3816 unsigned long phys_aob_addr = buffer->element[e].addr;
3817
3818 qeth_qdio_handle_aob(card, phys_aob_addr);
3819 ++e;
3820 }
3821 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3822 }
3823 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3824 card->qdio.c_q->next_buf_to_init,
3825 count);
3826 if (rc) {
3827 dev_warn(&card->gdev->dev,
3828 "QDIO reported an error, rc=%i\n", rc);
3829 QETH_CARD_TEXT(card, 2, "qcqherr");
3830 }
3831
3832 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3833}
3834
3835static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3836 unsigned int qdio_err, int queue,
3837 int first_elem, int count,
3838 unsigned long card_ptr)
3839{
3840 struct qeth_card *card = (struct qeth_card *)card_ptr;
3841
3842 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3843 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3844
3845 if (qdio_err)
3846 qeth_schedule_recovery(card);
3847}
3848
3849static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3850 unsigned int qdio_error, int __queue,
3851 int first_element, int count,
3852 unsigned long card_ptr)
3853{
3854 struct qeth_card *card = (struct qeth_card *) card_ptr;
3855 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3856 struct net_device *dev = card->dev;
3857 struct netdev_queue *txq;
3858 int i;
3859
3860 QETH_CARD_TEXT(card, 6, "qdouhdl");
3861 if (qdio_error & QDIO_ERROR_FATAL) {
3862 QETH_CARD_TEXT(card, 2, "achkcond");
3863 netif_tx_stop_all_queues(dev);
3864 qeth_schedule_recovery(card);
3865 return;
3866 }
3867
3868 for (i = first_element; i < (first_element + count); ++i) {
3869 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3870
3871 qeth_handle_send_error(card, buf, qdio_error);
3872 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3873 }
3874
3875 atomic_sub(count, &queue->used_buffers);
3876 qeth_check_outbound_queue(queue);
3877
3878 txq = netdev_get_tx_queue(dev, __queue);
3879 /* xmit may have observed the full-condition, but not yet stopped the
3880 * txq. In which case the code below won't trigger. So before returning,
3881 * xmit will re-check the txq's fill level and wake it up if needed.
3882 */
3883 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3884 netif_tx_wake_queue(txq);
3885}
3886
3887/**
3888 * Note: Function assumes that we have 4 outbound queues.
3889 */
3890int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3891{
3892 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3893 u8 tos;
3894
3895 switch (card->qdio.do_prio_queueing) {
3896 case QETH_PRIO_Q_ING_TOS:
3897 case QETH_PRIO_Q_ING_PREC:
3898 switch (qeth_get_ip_version(skb)) {
3899 case 4:
3900 tos = ipv4_get_dsfield(ip_hdr(skb));
3901 break;
3902 case 6:
3903 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3904 break;
3905 default:
3906 return card->qdio.default_out_queue;
3907 }
3908 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3909 return ~tos >> 6 & 3;
3910 if (tos & IPTOS_MINCOST)
3911 return 3;
3912 if (tos & IPTOS_RELIABILITY)
3913 return 2;
3914 if (tos & IPTOS_THROUGHPUT)
3915 return 1;
3916 if (tos & IPTOS_LOWDELAY)
3917 return 0;
3918 break;
3919 case QETH_PRIO_Q_ING_SKB:
3920 if (skb->priority > 5)
3921 return 0;
3922 return ~skb->priority >> 1 & 3;
3923 case QETH_PRIO_Q_ING_VLAN:
3924 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3925 return ~ntohs(veth->h_vlan_TCI) >>
3926 (VLAN_PRIO_SHIFT + 1) & 3;
3927 break;
3928 case QETH_PRIO_Q_ING_FIXED:
3929 return card->qdio.default_out_queue;
3930 default:
3931 break;
3932 }
3933 return card->qdio.default_out_queue;
3934}
3935EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3936
3937/**
3938 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3939 * @skb: SKB address
3940 *
3941 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3942 * fragmented part of the SKB. Returns zero for linear SKB.
3943 */
3944static int qeth_get_elements_for_frags(struct sk_buff *skb)
3945{
3946 int cnt, elements = 0;
3947
3948 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3949 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3950
3951 elements += qeth_get_elements_for_range(
3952 (addr_t)skb_frag_address(frag),
3953 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3954 }
3955 return elements;
3956}
3957
3958/**
3959 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3960 * to transmit an skb.
3961 * @skb: the skb to operate on.
3962 * @data_offset: skip this part of the skb's linear data
3963 *
3964 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3965 * skb's data (both its linear part and paged fragments).
3966 */
3967unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3968{
3969 unsigned int elements = qeth_get_elements_for_frags(skb);
3970 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3971 addr_t start = (addr_t)skb->data + data_offset;
3972
3973 if (start != end)
3974 elements += qeth_get_elements_for_range(start, end);
3975 return elements;
3976}
3977EXPORT_SYMBOL_GPL(qeth_count_elements);
3978
3979#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3980 MAX_TCP_HEADER)
3981
3982/**
3983 * qeth_add_hw_header() - add a HW header to an skb.
3984 * @skb: skb that the HW header should be added to.
3985 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3986 * it contains a valid pointer to a qeth_hdr.
3987 * @hdr_len: length of the HW header.
3988 * @proto_len: length of protocol headers that need to be in same page as the
3989 * HW header.
3990 *
3991 * Returns the pushed length. If the header can't be pushed on
3992 * (eg. because it would cross a page boundary), it is allocated from
3993 * the cache instead and 0 is returned.
3994 * The number of needed buffer elements is returned in @elements.
3995 * Error to create the hdr is indicated by returning with < 0.
3996 */
3997static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3998 struct sk_buff *skb, struct qeth_hdr **hdr,
3999 unsigned int hdr_len, unsigned int proto_len,
4000 unsigned int *elements)
4001{
4002 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
4003 const unsigned int contiguous = proto_len ? proto_len : 1;
4004 const unsigned int max_elements = queue->max_elements;
4005 unsigned int __elements;
4006 addr_t start, end;
4007 bool push_ok;
4008 int rc;
4009
4010check_layout:
4011 start = (addr_t)skb->data - hdr_len;
4012 end = (addr_t)skb->data;
4013
4014 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4015 /* Push HW header into same page as first protocol header. */
4016 push_ok = true;
4017 /* ... but TSO always needs a separate element for headers: */
4018 if (skb_is_gso(skb))
4019 __elements = 1 + qeth_count_elements(skb, proto_len);
4020 else
4021 __elements = qeth_count_elements(skb, 0);
4022 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4023 /* Push HW header into preceding page, flush with skb->data. */
4024 push_ok = true;
4025 __elements = 1 + qeth_count_elements(skb, 0);
4026 } else {
4027 /* Use header cache, copy protocol headers up. */
4028 push_ok = false;
4029 __elements = 1 + qeth_count_elements(skb, proto_len);
4030 }
4031
4032 /* Compress skb to fit into one IO buffer: */
4033 if (__elements > max_elements) {
4034 if (!skb_is_nonlinear(skb)) {
4035 /* Drop it, no easy way of shrinking it further. */
4036 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4037 max_elements, __elements, skb->len);
4038 return -E2BIG;
4039 }
4040
4041 rc = skb_linearize(skb);
4042 if (rc) {
4043 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4044 return rc;
4045 }
4046
4047 QETH_TXQ_STAT_INC(queue, skbs_linearized);
4048 /* Linearization changed the layout, re-evaluate: */
4049 goto check_layout;
4050 }
4051
4052 *elements = __elements;
4053 /* Add the header: */
4054 if (push_ok) {
4055 *hdr = skb_push(skb, hdr_len);
4056 return hdr_len;
4057 }
4058
4059 /* Fall back to cache element with known-good alignment: */
4060 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4061 return -E2BIG;
4062 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4063 if (!*hdr)
4064 return -ENOMEM;
4065 /* Copy protocol headers behind HW header: */
4066 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4067 return 0;
4068}
4069
4070static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4071 struct sk_buff *curr_skb,
4072 struct qeth_hdr *curr_hdr)
4073{
4074 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4075 struct qeth_hdr *prev_hdr = queue->prev_hdr;
4076
4077 if (!prev_hdr)
4078 return true;
4079
4080 /* All packets must have the same target: */
4081 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4082 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4083
4084 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4085 eth_hdr(curr_skb)->h_dest) &&
4086 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4087 }
4088
4089 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4090 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4091}
4092
4093/**
4094 * qeth_fill_buffer() - map skb into an output buffer
4095 * @buf: buffer to transport the skb
4096 * @skb: skb to map into the buffer
4097 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
4098 * from qeth_core_header_cache.
4099 * @offset: when mapping the skb, start at skb->data + offset
4100 * @hd_len: if > 0, build a dedicated header element of this size
4101 */
4102static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4103 struct sk_buff *skb, struct qeth_hdr *hdr,
4104 unsigned int offset, unsigned int hd_len)
4105{
4106 struct qdio_buffer *buffer = buf->buffer;
4107 int element = buf->next_element_to_fill;
4108 int length = skb_headlen(skb) - offset;
4109 char *data = skb->data + offset;
4110 unsigned int elem_length, cnt;
4111 bool is_first_elem = true;
4112
4113 __skb_queue_tail(&buf->skb_list, skb);
4114
4115 /* build dedicated element for HW Header */
4116 if (hd_len) {
4117 is_first_elem = false;
4118
4119 buffer->element[element].addr = virt_to_phys(hdr);
4120 buffer->element[element].length = hd_len;
4121 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4122
4123 /* HW header is allocated from cache: */
4124 if ((void *)hdr != skb->data)
4125 buf->is_header[element] = 1;
4126 /* HW header was pushed and is contiguous with linear part: */
4127 else if (length > 0 && !PAGE_ALIGNED(data) &&
4128 (data == (char *)hdr + hd_len))
4129 buffer->element[element].eflags |=
4130 SBAL_EFLAGS_CONTIGUOUS;
4131
4132 element++;
4133 }
4134
4135 /* map linear part into buffer element(s) */
4136 while (length > 0) {
4137 elem_length = min_t(unsigned int, length,
4138 PAGE_SIZE - offset_in_page(data));
4139
4140 buffer->element[element].addr = virt_to_phys(data);
4141 buffer->element[element].length = elem_length;
4142 length -= elem_length;
4143 if (is_first_elem) {
4144 is_first_elem = false;
4145 if (length || skb_is_nonlinear(skb))
4146 /* skb needs additional elements */
4147 buffer->element[element].eflags =
4148 SBAL_EFLAGS_FIRST_FRAG;
4149 else
4150 buffer->element[element].eflags = 0;
4151 } else {
4152 buffer->element[element].eflags =
4153 SBAL_EFLAGS_MIDDLE_FRAG;
4154 }
4155
4156 data += elem_length;
4157 element++;
4158 }
4159
4160 /* map page frags into buffer element(s) */
4161 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4162 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4163
4164 data = skb_frag_address(frag);
4165 length = skb_frag_size(frag);
4166 while (length > 0) {
4167 elem_length = min_t(unsigned int, length,
4168 PAGE_SIZE - offset_in_page(data));
4169
4170 buffer->element[element].addr = virt_to_phys(data);
4171 buffer->element[element].length = elem_length;
4172 buffer->element[element].eflags =
4173 SBAL_EFLAGS_MIDDLE_FRAG;
4174
4175 length -= elem_length;
4176 data += elem_length;
4177 element++;
4178 }
4179 }
4180
4181 if (buffer->element[element - 1].eflags)
4182 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4183 buf->next_element_to_fill = element;
4184 return element;
4185}
4186
4187static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4188 struct sk_buff *skb, unsigned int elements,
4189 struct qeth_hdr *hdr, unsigned int offset,
4190 unsigned int hd_len)
4191{
4192 unsigned int bytes = qdisc_pkt_len(skb);
4193 struct qeth_qdio_out_buffer *buffer;
4194 unsigned int next_element;
4195 struct netdev_queue *txq;
4196 bool stopped = false;
4197 bool flush;
4198
4199 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4200 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4201
4202 /* Just a sanity check, the wake/stop logic should ensure that we always
4203 * get a free buffer.
4204 */
4205 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4206 return -EBUSY;
4207
4208 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4209
4210 if (flush ||
4211 (buffer->next_element_to_fill + elements > queue->max_elements)) {
4212 if (buffer->next_element_to_fill > 0) {
4213 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4214 queue->bulk_count++;
4215 }
4216
4217 if (queue->bulk_count >= queue->bulk_max)
4218 flush = true;
4219
4220 if (flush)
4221 qeth_flush_queue(queue);
4222
4223 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4224 queue->bulk_count)];
4225
4226 /* Sanity-check again: */
4227 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4228 return -EBUSY;
4229 }
4230
4231 if (buffer->next_element_to_fill == 0 &&
4232 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4233 /* If a TX completion happens right _here_ and misses to wake
4234 * the txq, then our re-check below will catch the race.
4235 */
4236 QETH_TXQ_STAT_INC(queue, stopped);
4237 netif_tx_stop_queue(txq);
4238 stopped = true;
4239 }
4240
4241 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4242 buffer->bytes += bytes;
4243 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4244 queue->prev_hdr = hdr;
4245
4246 flush = __netdev_tx_sent_queue(txq, bytes,
4247 !stopped && netdev_xmit_more());
4248
4249 if (flush || next_element >= queue->max_elements) {
4250 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4251 queue->bulk_count++;
4252
4253 if (queue->bulk_count >= queue->bulk_max)
4254 flush = true;
4255
4256 if (flush)
4257 qeth_flush_queue(queue);
4258 }
4259
4260 if (stopped && !qeth_out_queue_is_full(queue))
4261 netif_tx_start_queue(txq);
4262 return 0;
4263}
4264
4265int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4266 struct sk_buff *skb, struct qeth_hdr *hdr,
4267 unsigned int offset, unsigned int hd_len,
4268 int elements_needed)
4269{
4270 struct qeth_qdio_out_buffer *buffer;
4271 unsigned int next_element;
4272 struct netdev_queue *txq;
4273 bool stopped = false;
4274 int start_index;
4275 int flush_count = 0;
4276 int do_pack = 0;
4277 int tmp;
4278 int rc = 0;
4279
4280 /* spin until we get the queue ... */
4281 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4282 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4283 start_index = queue->next_buf_to_fill;
4284 buffer = queue->bufs[queue->next_buf_to_fill];
4285
4286 /* Just a sanity check, the wake/stop logic should ensure that we always
4287 * get a free buffer.
4288 */
4289 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4290 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4291 return -EBUSY;
4292 }
4293
4294 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4295
4296 /* check if we need to switch packing state of this queue */
4297 qeth_switch_to_packing_if_needed(queue);
4298 if (queue->do_pack) {
4299 do_pack = 1;
4300 /* does packet fit in current buffer? */
4301 if (buffer->next_element_to_fill + elements_needed >
4302 queue->max_elements) {
4303 /* ... no -> set state PRIMED */
4304 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4305 flush_count++;
4306 queue->next_buf_to_fill =
4307 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4308 buffer = queue->bufs[queue->next_buf_to_fill];
4309
4310 /* We stepped forward, so sanity-check again: */
4311 if (atomic_read(&buffer->state) !=
4312 QETH_QDIO_BUF_EMPTY) {
4313 qeth_flush_buffers(queue, start_index,
4314 flush_count);
4315 atomic_set(&queue->state,
4316 QETH_OUT_Q_UNLOCKED);
4317 rc = -EBUSY;
4318 goto out;
4319 }
4320 }
4321 }
4322
4323 if (buffer->next_element_to_fill == 0 &&
4324 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4325 /* If a TX completion happens right _here_ and misses to wake
4326 * the txq, then our re-check below will catch the race.
4327 */
4328 QETH_TXQ_STAT_INC(queue, stopped);
4329 netif_tx_stop_queue(txq);
4330 stopped = true;
4331 }
4332
4333 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4334 buffer->bytes += qdisc_pkt_len(skb);
4335 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4336
4337 if (queue->do_pack)
4338 QETH_TXQ_STAT_INC(queue, skbs_pack);
4339 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4340 flush_count++;
4341 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4342 queue->next_buf_to_fill =
4343 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4344 }
4345
4346 if (flush_count)
4347 qeth_flush_buffers(queue, start_index, flush_count);
4348 else if (!atomic_read(&queue->set_pci_flags_count))
4349 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4350 /*
4351 * queue->state will go from LOCKED -> UNLOCKED or from
4352 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4353 * (switch packing state or flush buffer to get another pci flag out).
4354 * In that case we will enter this loop
4355 */
4356 while (atomic_dec_return(&queue->state)) {
4357 start_index = queue->next_buf_to_fill;
4358 /* check if we can go back to non-packing state */
4359 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4360 /*
4361 * check if we need to flush a packing buffer to get a pci
4362 * flag out on the queue
4363 */
4364 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4365 tmp = qeth_prep_flush_pack_buffer(queue);
4366 if (tmp) {
4367 qeth_flush_buffers(queue, start_index, tmp);
4368 flush_count += tmp;
4369 }
4370 }
4371out:
4372 /* at this point the queue is UNLOCKED again */
4373 if (do_pack)
4374 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4375
4376 if (stopped && !qeth_out_queue_is_full(queue))
4377 netif_tx_start_queue(txq);
4378 return rc;
4379}
4380EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4381
4382static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4383 unsigned int payload_len, struct sk_buff *skb,
4384 unsigned int proto_len)
4385{
4386 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4387
4388 ext->hdr_tot_len = sizeof(*ext);
4389 ext->imb_hdr_no = 1;
4390 ext->hdr_type = 1;
4391 ext->hdr_version = 1;
4392 ext->hdr_len = 28;
4393 ext->payload_len = payload_len;
4394 ext->mss = skb_shinfo(skb)->gso_size;
4395 ext->dg_hdr_len = proto_len;
4396}
4397
4398int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4399 struct qeth_qdio_out_q *queue, int ipv,
4400 void (*fill_header)(struct qeth_qdio_out_q *queue,
4401 struct qeth_hdr *hdr, struct sk_buff *skb,
4402 int ipv, unsigned int data_len))
4403{
4404 unsigned int proto_len, hw_hdr_len;
4405 unsigned int frame_len = skb->len;
4406 bool is_tso = skb_is_gso(skb);
4407 unsigned int data_offset = 0;
4408 struct qeth_hdr *hdr = NULL;
4409 unsigned int hd_len = 0;
4410 unsigned int elements;
4411 int push_len, rc;
4412
4413 if (is_tso) {
4414 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4415 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4416 } else {
4417 hw_hdr_len = sizeof(struct qeth_hdr);
4418 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4419 }
4420
4421 rc = skb_cow_head(skb, hw_hdr_len);
4422 if (rc)
4423 return rc;
4424
4425 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4426 &elements);
4427 if (push_len < 0)
4428 return push_len;
4429 if (is_tso || !push_len) {
4430 /* HW header needs its own buffer element. */
4431 hd_len = hw_hdr_len + proto_len;
4432 data_offset = push_len + proto_len;
4433 }
4434 memset(hdr, 0, hw_hdr_len);
4435 fill_header(queue, hdr, skb, ipv, frame_len);
4436 if (is_tso)
4437 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4438 frame_len - proto_len, skb, proto_len);
4439
4440 if (IS_IQD(card)) {
4441 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4442 hd_len);
4443 } else {
4444 /* TODO: drop skb_orphan() once TX completion is fast enough */
4445 skb_orphan(skb);
4446 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4447 hd_len, elements);
4448 }
4449
4450 if (rc && !push_len)
4451 kmem_cache_free(qeth_core_header_cache, hdr);
4452
4453 return rc;
4454}
4455EXPORT_SYMBOL_GPL(qeth_xmit);
4456
4457static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4458 struct qeth_reply *reply, unsigned long data)
4459{
4460 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4461 struct qeth_ipacmd_setadpparms *setparms;
4462
4463 QETH_CARD_TEXT(card, 4, "prmadpcb");
4464
4465 setparms = &(cmd->data.setadapterparms);
4466 if (qeth_setadpparms_inspect_rc(cmd)) {
4467 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4468 setparms->data.mode = SET_PROMISC_MODE_OFF;
4469 }
4470 card->info.promisc_mode = setparms->data.mode;
4471 return (cmd->hdr.return_code) ? -EIO : 0;
4472}
4473
4474void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4475{
4476 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4477 SET_PROMISC_MODE_OFF;
4478 struct qeth_cmd_buffer *iob;
4479 struct qeth_ipa_cmd *cmd;
4480
4481 QETH_CARD_TEXT(card, 4, "setprom");
4482 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4483
4484 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4485 SETADP_DATA_SIZEOF(mode));
4486 if (!iob)
4487 return;
4488 cmd = __ipa_cmd(iob);
4489 cmd->data.setadapterparms.data.mode = mode;
4490 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4491}
4492EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4493
4494static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4495 struct qeth_reply *reply, unsigned long data)
4496{
4497 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4498 struct qeth_ipacmd_setadpparms *adp_cmd;
4499
4500 QETH_CARD_TEXT(card, 4, "chgmaccb");
4501 if (qeth_setadpparms_inspect_rc(cmd))
4502 return -EIO;
4503
4504 adp_cmd = &cmd->data.setadapterparms;
4505 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4506 return -EADDRNOTAVAIL;
4507
4508 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4509 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4510 return -EADDRNOTAVAIL;
4511
4512 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4513 return 0;
4514}
4515
4516int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4517{
4518 int rc;
4519 struct qeth_cmd_buffer *iob;
4520 struct qeth_ipa_cmd *cmd;
4521
4522 QETH_CARD_TEXT(card, 4, "chgmac");
4523
4524 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4525 SETADP_DATA_SIZEOF(change_addr));
4526 if (!iob)
4527 return -ENOMEM;
4528 cmd = __ipa_cmd(iob);
4529 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4530 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4531 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4532 card->dev->dev_addr);
4533 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4534 NULL);
4535 return rc;
4536}
4537EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4538
4539static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4540 struct qeth_reply *reply, unsigned long data)
4541{
4542 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4543 struct qeth_set_access_ctrl *access_ctrl_req;
4544 int fallback = *(int *)reply->param;
4545
4546 QETH_CARD_TEXT(card, 4, "setaccb");
4547 if (cmd->hdr.return_code)
4548 return -EIO;
4549 qeth_setadpparms_inspect_rc(cmd);
4550
4551 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4552 QETH_CARD_TEXT_(card, 2, "rc=%d",
4553 cmd->data.setadapterparms.hdr.return_code);
4554 if (cmd->data.setadapterparms.hdr.return_code !=
4555 SET_ACCESS_CTRL_RC_SUCCESS)
4556 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4557 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4558 cmd->data.setadapterparms.hdr.return_code);
4559 switch (cmd->data.setadapterparms.hdr.return_code) {
4560 case SET_ACCESS_CTRL_RC_SUCCESS:
4561 if (card->options.isolation == ISOLATION_MODE_NONE) {
4562 dev_info(&card->gdev->dev,
4563 "QDIO data connection isolation is deactivated\n");
4564 } else {
4565 dev_info(&card->gdev->dev,
4566 "QDIO data connection isolation is activated\n");
4567 }
4568 break;
4569 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4570 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4571 CARD_DEVID(card));
4572 if (fallback)
4573 card->options.isolation = card->options.prev_isolation;
4574 break;
4575 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4576 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4577 CARD_DEVID(card));
4578 if (fallback)
4579 card->options.isolation = card->options.prev_isolation;
4580 break;
4581 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4582 dev_err(&card->gdev->dev, "Adapter does not "
4583 "support QDIO data connection isolation\n");
4584 break;
4585 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4586 dev_err(&card->gdev->dev,
4587 "Adapter is dedicated. "
4588 "QDIO data connection isolation not supported\n");
4589 if (fallback)
4590 card->options.isolation = card->options.prev_isolation;
4591 break;
4592 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4593 dev_err(&card->gdev->dev,
4594 "TSO does not permit QDIO data connection isolation\n");
4595 if (fallback)
4596 card->options.isolation = card->options.prev_isolation;
4597 break;
4598 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4599 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4600 "support reflective relay mode\n");
4601 if (fallback)
4602 card->options.isolation = card->options.prev_isolation;
4603 break;
4604 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4605 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4606 "enabled at the adjacent switch port");
4607 if (fallback)
4608 card->options.isolation = card->options.prev_isolation;
4609 break;
4610 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4611 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4612 "at the adjacent switch failed\n");
4613 break;
4614 default:
4615 /* this should never happen */
4616 if (fallback)
4617 card->options.isolation = card->options.prev_isolation;
4618 break;
4619 }
4620 return (cmd->hdr.return_code) ? -EIO : 0;
4621}
4622
4623static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4624 enum qeth_ipa_isolation_modes isolation, int fallback)
4625{
4626 int rc;
4627 struct qeth_cmd_buffer *iob;
4628 struct qeth_ipa_cmd *cmd;
4629 struct qeth_set_access_ctrl *access_ctrl_req;
4630
4631 QETH_CARD_TEXT(card, 4, "setacctl");
4632
4633 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4634 SETADP_DATA_SIZEOF(set_access_ctrl));
4635 if (!iob)
4636 return -ENOMEM;
4637 cmd = __ipa_cmd(iob);
4638 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4639 access_ctrl_req->subcmd_code = isolation;
4640
4641 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4642 &fallback);
4643 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4644 return rc;
4645}
4646
4647int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4648{
4649 int rc = 0;
4650
4651 QETH_CARD_TEXT(card, 4, "setactlo");
4652
4653 if ((IS_OSD(card) || IS_OSX(card)) &&
4654 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4655 rc = qeth_setadpparms_set_access_ctrl(card,
4656 card->options.isolation, fallback);
4657 if (rc) {
4658 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4659 rc, CARD_DEVID(card));
4660 rc = -EOPNOTSUPP;
4661 }
4662 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4663 card->options.isolation = ISOLATION_MODE_NONE;
4664
4665 dev_err(&card->gdev->dev, "Adapter does not "
4666 "support QDIO data connection isolation\n");
4667 rc = -EOPNOTSUPP;
4668 }
4669 return rc;
4670}
4671
4672void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4673{
4674 struct qeth_card *card;
4675
4676 card = dev->ml_priv;
4677 QETH_CARD_TEXT(card, 4, "txtimeo");
4678 qeth_schedule_recovery(card);
4679}
4680EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4681
4682static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4683{
4684 struct qeth_card *card = dev->ml_priv;
4685 int rc = 0;
4686
4687 switch (regnum) {
4688 case MII_BMCR: /* Basic mode control register */
4689 rc = BMCR_FULLDPLX;
4690 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4691 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4692 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4693 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4694 rc |= BMCR_SPEED100;
4695 break;
4696 case MII_BMSR: /* Basic mode status register */
4697 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4698 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4699 BMSR_100BASE4;
4700 break;
4701 case MII_PHYSID1: /* PHYS ID 1 */
4702 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4703 dev->dev_addr[2];
4704 rc = (rc >> 5) & 0xFFFF;
4705 break;
4706 case MII_PHYSID2: /* PHYS ID 2 */
4707 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4708 break;
4709 case MII_ADVERTISE: /* Advertisement control reg */
4710 rc = ADVERTISE_ALL;
4711 break;
4712 case MII_LPA: /* Link partner ability reg */
4713 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4714 LPA_100BASE4 | LPA_LPACK;
4715 break;
4716 case MII_EXPANSION: /* Expansion register */
4717 break;
4718 case MII_DCOUNTER: /* disconnect counter */
4719 break;
4720 case MII_FCSCOUNTER: /* false carrier counter */
4721 break;
4722 case MII_NWAYTEST: /* N-way auto-neg test register */
4723 break;
4724 case MII_RERRCOUNTER: /* rx error counter */
4725 rc = card->stats.rx_length_errors +
4726 card->stats.rx_frame_errors +
4727 card->stats.rx_fifo_errors;
4728 break;
4729 case MII_SREVISION: /* silicon revision */
4730 break;
4731 case MII_RESV1: /* reserved 1 */
4732 break;
4733 case MII_LBRERROR: /* loopback, rx, bypass error */
4734 break;
4735 case MII_PHYADDR: /* physical address */
4736 break;
4737 case MII_RESV2: /* reserved 2 */
4738 break;
4739 case MII_TPISTATUS: /* TPI status for 10mbps */
4740 break;
4741 case MII_NCONFIG: /* network interface config */
4742 break;
4743 default:
4744 break;
4745 }
4746 return rc;
4747}
4748
4749static int qeth_snmp_command_cb(struct qeth_card *card,
4750 struct qeth_reply *reply, unsigned long data)
4751{
4752 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4753 struct qeth_arp_query_info *qinfo = reply->param;
4754 struct qeth_ipacmd_setadpparms *adp_cmd;
4755 unsigned int data_len;
4756 void *snmp_data;
4757
4758 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4759
4760 if (cmd->hdr.return_code) {
4761 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4762 return -EIO;
4763 }
4764 if (cmd->data.setadapterparms.hdr.return_code) {
4765 cmd->hdr.return_code =
4766 cmd->data.setadapterparms.hdr.return_code;
4767 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4768 return -EIO;
4769 }
4770
4771 adp_cmd = &cmd->data.setadapterparms;
4772 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4773 if (adp_cmd->hdr.seq_no == 1) {
4774 snmp_data = &adp_cmd->data.snmp;
4775 } else {
4776 snmp_data = &adp_cmd->data.snmp.request;
4777 data_len -= offsetof(struct qeth_snmp_cmd, request);
4778 }
4779
4780 /* check if there is enough room in userspace */
4781 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4782 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4783 return -ENOSPC;
4784 }
4785 QETH_CARD_TEXT_(card, 4, "snore%i",
4786 cmd->data.setadapterparms.hdr.used_total);
4787 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4788 cmd->data.setadapterparms.hdr.seq_no);
4789 /*copy entries to user buffer*/
4790 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4791 qinfo->udata_offset += data_len;
4792
4793 if (cmd->data.setadapterparms.hdr.seq_no <
4794 cmd->data.setadapterparms.hdr.used_total)
4795 return 1;
4796 return 0;
4797}
4798
4799static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4800{
4801 struct qeth_snmp_ureq __user *ureq;
4802 struct qeth_cmd_buffer *iob;
4803 unsigned int req_len;
4804 struct qeth_arp_query_info qinfo = {0, };
4805 int rc = 0;
4806
4807 QETH_CARD_TEXT(card, 3, "snmpcmd");
4808
4809 if (IS_VM_NIC(card))
4810 return -EOPNOTSUPP;
4811
4812 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4813 IS_LAYER3(card))
4814 return -EOPNOTSUPP;
4815
4816 ureq = (struct qeth_snmp_ureq __user *) udata;
4817 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4818 get_user(req_len, &ureq->hdr.req_len))
4819 return -EFAULT;
4820
4821 /* Sanitize user input, to avoid overflows in iob size calculation: */
4822 if (req_len > QETH_BUFSIZE)
4823 return -EINVAL;
4824
4825 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4826 if (!iob)
4827 return -ENOMEM;
4828
4829 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4830 &ureq->cmd, req_len)) {
4831 qeth_put_cmd(iob);
4832 return -EFAULT;
4833 }
4834
4835 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4836 if (!qinfo.udata) {
4837 qeth_put_cmd(iob);
4838 return -ENOMEM;
4839 }
4840 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4841
4842 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4843 if (rc)
4844 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4845 CARD_DEVID(card), rc);
4846 else {
4847 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4848 rc = -EFAULT;
4849 }
4850
4851 kfree(qinfo.udata);
4852 return rc;
4853}
4854
4855static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4856 struct qeth_reply *reply, unsigned long data)
4857{
4858 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4859 struct qeth_qoat_priv *priv;
4860 char *resdata;
4861 int resdatalen;
4862
4863 QETH_CARD_TEXT(card, 3, "qoatcb");
4864 if (qeth_setadpparms_inspect_rc(cmd))
4865 return -EIO;
4866
4867 priv = (struct qeth_qoat_priv *)reply->param;
4868 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4869 resdata = (char *)data + 28;
4870
4871 if (resdatalen > (priv->buffer_len - priv->response_len))
4872 return -ENOSPC;
4873
4874 memcpy((priv->buffer + priv->response_len), resdata,
4875 resdatalen);
4876 priv->response_len += resdatalen;
4877
4878 if (cmd->data.setadapterparms.hdr.seq_no <
4879 cmd->data.setadapterparms.hdr.used_total)
4880 return 1;
4881 return 0;
4882}
4883
4884static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4885{
4886 int rc = 0;
4887 struct qeth_cmd_buffer *iob;
4888 struct qeth_ipa_cmd *cmd;
4889 struct qeth_query_oat *oat_req;
4890 struct qeth_query_oat_data oat_data;
4891 struct qeth_qoat_priv priv;
4892 void __user *tmp;
4893
4894 QETH_CARD_TEXT(card, 3, "qoatcmd");
4895
4896 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4897 rc = -EOPNOTSUPP;
4898 goto out;
4899 }
4900
4901 if (copy_from_user(&oat_data, udata,
4902 sizeof(struct qeth_query_oat_data))) {
4903 rc = -EFAULT;
4904 goto out;
4905 }
4906
4907 priv.buffer_len = oat_data.buffer_len;
4908 priv.response_len = 0;
4909 priv.buffer = vzalloc(oat_data.buffer_len);
4910 if (!priv.buffer) {
4911 rc = -ENOMEM;
4912 goto out;
4913 }
4914
4915 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4916 SETADP_DATA_SIZEOF(query_oat));
4917 if (!iob) {
4918 rc = -ENOMEM;
4919 goto out_free;
4920 }
4921 cmd = __ipa_cmd(iob);
4922 oat_req = &cmd->data.setadapterparms.data.query_oat;
4923 oat_req->subcmd_code = oat_data.command;
4924
4925 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4926 &priv);
4927 if (!rc) {
4928 if (is_compat_task())
4929 tmp = compat_ptr(oat_data.ptr);
4930 else
4931 tmp = (void __user *)(unsigned long)oat_data.ptr;
4932
4933 if (copy_to_user(tmp, priv.buffer,
4934 priv.response_len)) {
4935 rc = -EFAULT;
4936 goto out_free;
4937 }
4938
4939 oat_data.response_len = priv.response_len;
4940
4941 if (copy_to_user(udata, &oat_data,
4942 sizeof(struct qeth_query_oat_data)))
4943 rc = -EFAULT;
4944 }
4945
4946out_free:
4947 vfree(priv.buffer);
4948out:
4949 return rc;
4950}
4951
4952static int qeth_query_card_info_cb(struct qeth_card *card,
4953 struct qeth_reply *reply, unsigned long data)
4954{
4955 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4956 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4957 struct qeth_query_card_info *card_info;
4958
4959 QETH_CARD_TEXT(card, 2, "qcrdincb");
4960 if (qeth_setadpparms_inspect_rc(cmd))
4961 return -EIO;
4962
4963 card_info = &cmd->data.setadapterparms.data.card_info;
4964 carrier_info->card_type = card_info->card_type;
4965 carrier_info->port_mode = card_info->port_mode;
4966 carrier_info->port_speed = card_info->port_speed;
4967 return 0;
4968}
4969
4970int qeth_query_card_info(struct qeth_card *card,
4971 struct carrier_info *carrier_info)
4972{
4973 struct qeth_cmd_buffer *iob;
4974
4975 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4976 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4977 return -EOPNOTSUPP;
4978 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4979 if (!iob)
4980 return -ENOMEM;
4981 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4982 (void *)carrier_info);
4983}
4984
4985/**
4986 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4987 * @card: pointer to a qeth_card
4988 *
4989 * Returns
4990 * 0, if a MAC address has been set for the card's netdevice
4991 * a return code, for various error conditions
4992 */
4993int qeth_vm_request_mac(struct qeth_card *card)
4994{
4995 struct diag26c_mac_resp *response;
4996 struct diag26c_mac_req *request;
4997 struct ccw_dev_id id;
4998 int rc;
4999
5000 QETH_CARD_TEXT(card, 2, "vmreqmac");
5001
5002 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
5003 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
5004 if (!request || !response) {
5005 rc = -ENOMEM;
5006 goto out;
5007 }
5008
5009 ccw_device_get_id(CARD_DDEV(card), &id);
5010 request->resp_buf_len = sizeof(*response);
5011 request->resp_version = DIAG26C_VERSION2;
5012 request->op_code = DIAG26C_GET_MAC;
5013 request->devno = id.devno;
5014
5015 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5016 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5017 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5018 if (rc)
5019 goto out;
5020 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5021
5022 if (request->resp_buf_len < sizeof(*response) ||
5023 response->version != request->resp_version) {
5024 rc = -EIO;
5025 QETH_CARD_TEXT(card, 2, "badresp");
5026 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
5027 sizeof(request->resp_buf_len));
5028 } else if (!is_valid_ether_addr(response->mac)) {
5029 rc = -EINVAL;
5030 QETH_CARD_TEXT(card, 2, "badmac");
5031 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5032 } else {
5033 ether_addr_copy(card->dev->dev_addr, response->mac);
5034 }
5035
5036out:
5037 kfree(response);
5038 kfree(request);
5039 return rc;
5040}
5041EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5042
5043static void qeth_determine_capabilities(struct qeth_card *card)
5044{
5045 struct qeth_channel *channel = &card->data;
5046 struct ccw_device *ddev = channel->ccwdev;
5047 int rc;
5048 int ddev_offline = 0;
5049
5050 QETH_CARD_TEXT(card, 2, "detcapab");
5051 if (!ddev->online) {
5052 ddev_offline = 1;
5053 rc = qeth_start_channel(channel);
5054 if (rc) {
5055 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5056 goto out;
5057 }
5058 }
5059
5060 rc = qeth_read_conf_data(card);
5061 if (rc) {
5062 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5063 CARD_DEVID(card), rc);
5064 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5065 goto out_offline;
5066 }
5067
5068 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5069 if (rc)
5070 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5071
5072 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5073 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5074 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5075 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5076 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5077 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5078 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5079 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5080 dev_info(&card->gdev->dev,
5081 "Completion Queueing supported\n");
5082 } else {
5083 card->options.cq = QETH_CQ_NOTAVAILABLE;
5084 }
5085
5086
5087out_offline:
5088 if (ddev_offline == 1)
5089 qeth_stop_channel(channel);
5090out:
5091 return;
5092}
5093
5094static int qeth_qdio_establish(struct qeth_card *card)
5095{
5096 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5097 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5098 struct qdio_initialize init_data;
5099 char *qib_param_field;
5100 unsigned int i;
5101 int rc = 0;
5102
5103 QETH_CARD_TEXT(card, 2, "qdioest");
5104
5105 qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5106 if (!qib_param_field) {
5107 rc = -ENOMEM;
5108 goto out_free_nothing;
5109 }
5110
5111 qeth_create_qib_param_field(card, qib_param_field);
5112 qeth_create_qib_param_field_blkt(card, qib_param_field);
5113
5114 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5115 if (card->options.cq == QETH_CQ_ENABLED)
5116 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5117
5118 for (i = 0; i < card->qdio.no_out_queues; i++)
5119 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5120
5121 memset(&init_data, 0, sizeof(struct qdio_initialize));
5122 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5123 QDIO_QETH_QFMT;
5124 init_data.qib_param_field_format = 0;
5125 init_data.qib_param_field = qib_param_field;
5126 init_data.no_input_qs = card->qdio.no_in_queues;
5127 init_data.no_output_qs = card->qdio.no_out_queues;
5128 init_data.input_handler = qeth_qdio_input_handler;
5129 init_data.output_handler = qeth_qdio_output_handler;
5130 init_data.irq_poll = qeth_qdio_poll;
5131 init_data.int_parm = (unsigned long) card;
5132 init_data.input_sbal_addr_array = in_sbal_ptrs;
5133 init_data.output_sbal_addr_array = out_sbal_ptrs;
5134 init_data.output_sbal_state_array = card->qdio.out_bufstates;
5135 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
5136
5137 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5138 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5139 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5140 init_data.no_output_qs);
5141 if (rc) {
5142 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5143 goto out;
5144 }
5145 rc = qdio_establish(CARD_DDEV(card), &init_data);
5146 if (rc) {
5147 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5148 qdio_free(CARD_DDEV(card));
5149 }
5150 }
5151
5152 switch (card->options.cq) {
5153 case QETH_CQ_ENABLED:
5154 dev_info(&card->gdev->dev, "Completion Queue support enabled");
5155 break;
5156 case QETH_CQ_DISABLED:
5157 dev_info(&card->gdev->dev, "Completion Queue support disabled");
5158 break;
5159 default:
5160 break;
5161 }
5162out:
5163 kfree(qib_param_field);
5164out_free_nothing:
5165 return rc;
5166}
5167
5168static void qeth_core_free_card(struct qeth_card *card)
5169{
5170 QETH_CARD_TEXT(card, 2, "freecrd");
5171
5172 unregister_service_level(&card->qeth_service_level);
5173 debugfs_remove_recursive(card->debugfs);
5174 qeth_put_cmd(card->read_cmd);
5175 destroy_workqueue(card->event_wq);
5176 dev_set_drvdata(&card->gdev->dev, NULL);
5177 kfree(card);
5178}
5179
5180void qeth_trace_features(struct qeth_card *card)
5181{
5182 QETH_CARD_TEXT(card, 2, "features");
5183 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5184 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5185 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5186 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5187 sizeof(card->info.diagass_support));
5188}
5189EXPORT_SYMBOL_GPL(qeth_trace_features);
5190
5191static struct ccw_device_id qeth_ids[] = {
5192 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5193 .driver_info = QETH_CARD_TYPE_OSD},
5194 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5195 .driver_info = QETH_CARD_TYPE_IQD},
5196#ifdef CONFIG_QETH_OSN
5197 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5198 .driver_info = QETH_CARD_TYPE_OSN},
5199#endif
5200 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5201 .driver_info = QETH_CARD_TYPE_OSM},
5202#ifdef CONFIG_QETH_OSX
5203 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5204 .driver_info = QETH_CARD_TYPE_OSX},
5205#endif
5206 {},
5207};
5208MODULE_DEVICE_TABLE(ccw, qeth_ids);
5209
5210static struct ccw_driver qeth_ccw_driver = {
5211 .driver = {
5212 .owner = THIS_MODULE,
5213 .name = "qeth",
5214 },
5215 .ids = qeth_ids,
5216 .probe = ccwgroup_probe_ccwdev,
5217 .remove = ccwgroup_remove_ccwdev,
5218};
5219
5220int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5221{
5222 int retries = 3;
5223 int rc;
5224
5225 QETH_CARD_TEXT(card, 2, "hrdsetup");
5226 atomic_set(&card->force_alloc_skb, 0);
5227 rc = qeth_update_from_chp_desc(card);
5228 if (rc)
5229 return rc;
5230retry:
5231 if (retries < 3)
5232 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5233 CARD_DEVID(card));
5234 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5235 qeth_stop_channel(&card->data);
5236 qeth_stop_channel(&card->write);
5237 qeth_stop_channel(&card->read);
5238 qdio_free(CARD_DDEV(card));
5239
5240 rc = qeth_start_channel(&card->read);
5241 if (rc)
5242 goto retriable;
5243 rc = qeth_start_channel(&card->write);
5244 if (rc)
5245 goto retriable;
5246 rc = qeth_start_channel(&card->data);
5247 if (rc)
5248 goto retriable;
5249retriable:
5250 if (rc == -ERESTARTSYS) {
5251 QETH_CARD_TEXT(card, 2, "break1");
5252 return rc;
5253 } else if (rc) {
5254 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5255 if (--retries < 0)
5256 goto out;
5257 else
5258 goto retry;
5259 }
5260
5261 qeth_determine_capabilities(card);
5262 qeth_idx_init(card);
5263
5264 rc = qeth_idx_activate_read_channel(card);
5265 if (rc == -EINTR) {
5266 QETH_CARD_TEXT(card, 2, "break2");
5267 return rc;
5268 } else if (rc) {
5269 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5270 if (--retries < 0)
5271 goto out;
5272 else
5273 goto retry;
5274 }
5275
5276 rc = qeth_idx_activate_write_channel(card);
5277 if (rc == -EINTR) {
5278 QETH_CARD_TEXT(card, 2, "break3");
5279 return rc;
5280 } else if (rc) {
5281 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5282 if (--retries < 0)
5283 goto out;
5284 else
5285 goto retry;
5286 }
5287 card->read_or_write_problem = 0;
5288 rc = qeth_mpc_initialize(card);
5289 if (rc) {
5290 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5291 goto out;
5292 }
5293
5294 rc = qeth_send_startlan(card);
5295 if (rc) {
5296 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5297 if (rc == -ENETDOWN) {
5298 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5299 *carrier_ok = false;
5300 } else {
5301 goto out;
5302 }
5303 } else {
5304 *carrier_ok = true;
5305 }
5306
5307 card->options.ipa4.supported = 0;
5308 card->options.ipa6.supported = 0;
5309 card->options.adp.supported = 0;
5310 card->options.sbp.supported_funcs = 0;
5311 card->info.diagass_support = 0;
5312 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5313 if (rc == -ENOMEM)
5314 goto out;
5315 if (qeth_is_supported(card, IPA_IPV6)) {
5316 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5317 if (rc == -ENOMEM)
5318 goto out;
5319 }
5320 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5321 rc = qeth_query_setadapterparms(card);
5322 if (rc < 0) {
5323 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5324 goto out;
5325 }
5326 }
5327 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5328 rc = qeth_query_setdiagass(card);
5329 if (rc)
5330 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5331 }
5332
5333 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5334 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5335 card->info.hwtrap = 0;
5336
5337 rc = qeth_set_access_ctrl_online(card, 0);
5338 if (rc)
5339 goto out;
5340
5341 rc = qeth_init_qdio_queues(card);
5342 if (rc) {
5343 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5344 goto out;
5345 }
5346
5347 return 0;
5348out:
5349 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5350 "an error on the device\n");
5351 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5352 CARD_DEVID(card), rc);
5353 return rc;
5354}
5355EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5356
5357static int qeth_set_online(struct qeth_card *card)
5358{
5359 int rc;
5360
5361 mutex_lock(&card->discipline_mutex);
5362 mutex_lock(&card->conf_mutex);
5363 QETH_CARD_TEXT(card, 2, "setonlin");
5364
5365 rc = card->discipline->set_online(card);
5366
5367 mutex_unlock(&card->conf_mutex);
5368 mutex_unlock(&card->discipline_mutex);
5369
5370 return rc;
5371}
5372
5373int qeth_set_offline(struct qeth_card *card, bool resetting)
5374{
5375 int rc, rc2, rc3;
5376
5377 mutex_lock(&card->discipline_mutex);
5378 mutex_lock(&card->conf_mutex);
5379 QETH_CARD_TEXT(card, 3, "setoffl");
5380
5381 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5382 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5383 card->info.hwtrap = 1;
5384 }
5385
5386 rtnl_lock();
5387 card->info.open_when_online = card->dev->flags & IFF_UP;
5388 dev_close(card->dev);
5389 netif_device_detach(card->dev);
5390 netif_carrier_off(card->dev);
5391 rtnl_unlock();
5392
5393 card->discipline->set_offline(card);
5394
5395 rc = qeth_stop_channel(&card->data);
5396 rc2 = qeth_stop_channel(&card->write);
5397 rc3 = qeth_stop_channel(&card->read);
5398 if (!rc)
5399 rc = (rc2) ? rc2 : rc3;
5400 if (rc)
5401 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5402 qdio_free(CARD_DDEV(card));
5403
5404 /* let user_space know that device is offline */
5405 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5406
5407 mutex_unlock(&card->conf_mutex);
5408 mutex_unlock(&card->discipline_mutex);
5409 return 0;
5410}
5411EXPORT_SYMBOL_GPL(qeth_set_offline);
5412
5413static int qeth_do_reset(void *data)
5414{
5415 struct qeth_card *card = data;
5416 int rc;
5417
5418 QETH_CARD_TEXT(card, 2, "recover1");
5419 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5420 return 0;
5421 QETH_CARD_TEXT(card, 2, "recover2");
5422 dev_warn(&card->gdev->dev,
5423 "A recovery process has been started for the device\n");
5424
5425 qeth_set_offline(card, true);
5426 rc = qeth_set_online(card);
5427 if (!rc) {
5428 dev_info(&card->gdev->dev,
5429 "Device successfully recovered!\n");
5430 } else {
5431 ccwgroup_set_offline(card->gdev);
5432 dev_warn(&card->gdev->dev,
5433 "The qeth device driver failed to recover an error on the device\n");
5434 }
5435 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5436 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5437 return 0;
5438}
5439
5440#if IS_ENABLED(CONFIG_QETH_L3)
5441static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5442 struct qeth_hdr *hdr)
5443{
5444 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5445 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5446 struct net_device *dev = skb->dev;
5447
5448 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5449 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5450 "FAKELL", skb->len);
5451 return;
5452 }
5453
5454 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5455 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5456 ETH_P_IP;
5457 unsigned char tg_addr[ETH_ALEN];
5458
5459 skb_reset_network_header(skb);
5460 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5461 case QETH_CAST_MULTICAST:
5462 if (prot == ETH_P_IP)
5463 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5464 else
5465 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5466 QETH_CARD_STAT_INC(card, rx_multicast);
5467 break;
5468 case QETH_CAST_BROADCAST:
5469 ether_addr_copy(tg_addr, dev->broadcast);
5470 QETH_CARD_STAT_INC(card, rx_multicast);
5471 break;
5472 default:
5473 if (card->options.sniffer)
5474 skb->pkt_type = PACKET_OTHERHOST;
5475 ether_addr_copy(tg_addr, dev->dev_addr);
5476 }
5477
5478 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5479 dev_hard_header(skb, dev, prot, tg_addr,
5480 &l3_hdr->next_hop.rx.src_mac, skb->len);
5481 else
5482 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5483 skb->len);
5484 }
5485
5486 /* copy VLAN tag from hdr into skb */
5487 if (!card->options.sniffer &&
5488 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5489 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5490 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5491 l3_hdr->vlan_id :
5492 l3_hdr->next_hop.rx.vlan_id;
5493
5494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5495 }
5496}
5497#endif
5498
5499static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5500 struct qeth_hdr *hdr, bool uses_frags)
5501{
5502 struct napi_struct *napi = &card->napi;
5503 bool is_cso;
5504
5505 switch (hdr->hdr.l2.id) {
5506 case QETH_HEADER_TYPE_OSN:
5507 skb_push(skb, sizeof(*hdr));
5508 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5509 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5510 QETH_CARD_STAT_INC(card, rx_packets);
5511
5512 card->osn_info.data_cb(skb);
5513 return;
5514#if IS_ENABLED(CONFIG_QETH_L3)
5515 case QETH_HEADER_TYPE_LAYER3:
5516 qeth_l3_rebuild_skb(card, skb, hdr);
5517 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5518 break;
5519#endif
5520 case QETH_HEADER_TYPE_LAYER2:
5521 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5522 break;
5523 default:
5524 /* never happens */
5525 if (uses_frags)
5526 napi_free_frags(napi);
5527 else
5528 dev_kfree_skb_any(skb);
5529 return;
5530 }
5531
5532 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5533 skb->ip_summed = CHECKSUM_UNNECESSARY;
5534 QETH_CARD_STAT_INC(card, rx_skb_csum);
5535 } else {
5536 skb->ip_summed = CHECKSUM_NONE;
5537 }
5538
5539 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5540 QETH_CARD_STAT_INC(card, rx_packets);
5541 if (skb_is_nonlinear(skb)) {
5542 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5543 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5544 skb_shinfo(skb)->nr_frags);
5545 }
5546
5547 if (uses_frags) {
5548 napi_gro_frags(napi);
5549 } else {
5550 skb->protocol = eth_type_trans(skb, skb->dev);
5551 napi_gro_receive(napi, skb);
5552 }
5553}
5554
5555static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5556{
5557 struct page *page = virt_to_page(data);
5558 unsigned int next_frag;
5559
5560 next_frag = skb_shinfo(skb)->nr_frags;
5561 get_page(page);
5562 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5563 data_len);
5564}
5565
5566static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5567{
5568 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5569}
5570
5571static int qeth_extract_skb(struct qeth_card *card,
5572 struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5573 int *__offset)
5574{
5575 struct qeth_priv *priv = netdev_priv(card->dev);
5576 struct qdio_buffer *buffer = qethbuffer->buffer;
5577 struct napi_struct *napi = &card->napi;
5578 struct qdio_buffer_element *element;
5579 unsigned int linear_len = 0;
5580 bool uses_frags = false;
5581 int offset = *__offset;
5582 bool use_rx_sg = false;
5583 unsigned int headroom;
5584 struct qeth_hdr *hdr;
5585 struct sk_buff *skb;
5586 int skb_len = 0;
5587
5588 element = &buffer->element[*element_no];
5589
5590next_packet:
5591 /* qeth_hdr must not cross element boundaries */
5592 while (element->length < offset + sizeof(struct qeth_hdr)) {
5593 if (qeth_is_last_sbale(element))
5594 return -ENODATA;
5595 element++;
5596 offset = 0;
5597 }
5598
5599 hdr = phys_to_virt(element->addr) + offset;
5600 offset += sizeof(*hdr);
5601 skb = NULL;
5602
5603 switch (hdr->hdr.l2.id) {
5604 case QETH_HEADER_TYPE_LAYER2:
5605 skb_len = hdr->hdr.l2.pkt_length;
5606 linear_len = ETH_HLEN;
5607 headroom = 0;
5608 break;
5609 case QETH_HEADER_TYPE_LAYER3:
5610 skb_len = hdr->hdr.l3.length;
5611 if (!IS_LAYER3(card)) {
5612 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5613 goto walk_packet;
5614 }
5615
5616 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5617 linear_len = ETH_HLEN;
5618 headroom = 0;
5619 break;
5620 }
5621
5622 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5623 linear_len = sizeof(struct ipv6hdr);
5624 else
5625 linear_len = sizeof(struct iphdr);
5626 headroom = ETH_HLEN;
5627 break;
5628 case QETH_HEADER_TYPE_OSN:
5629 skb_len = hdr->hdr.osn.pdu_length;
5630 if (!IS_OSN(card)) {
5631 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5632 goto walk_packet;
5633 }
5634
5635 linear_len = skb_len;
5636 headroom = sizeof(struct qeth_hdr);
5637 break;
5638 default:
5639 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5640 QETH_CARD_STAT_INC(card, rx_frame_errors);
5641 else
5642 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5643
5644 /* Can't determine packet length, drop the whole buffer. */
5645 return -EPROTONOSUPPORT;
5646 }
5647
5648 if (skb_len < linear_len) {
5649 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5650 goto walk_packet;
5651 }
5652
5653 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5654 (skb_len > READ_ONCE(priv->rx_copybreak) &&
5655 !atomic_read(&card->force_alloc_skb) &&
5656 !IS_OSN(card));
5657
5658 if (use_rx_sg) {
5659 /* QETH_CQ_ENABLED only: */
5660 if (qethbuffer->rx_skb &&
5661 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5662 skb = qethbuffer->rx_skb;
5663 qethbuffer->rx_skb = NULL;
5664 goto use_skb;
5665 }
5666
5667 skb = napi_get_frags(napi);
5668 if (!skb) {
5669 /* -ENOMEM, no point in falling back further. */
5670 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5671 goto walk_packet;
5672 }
5673
5674 if (skb_tailroom(skb) >= linear_len + headroom) {
5675 uses_frags = true;
5676 goto use_skb;
5677 }
5678
5679 netdev_info_once(card->dev,
5680 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5681 linear_len + headroom, skb_tailroom(skb));
5682 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5683 }
5684
5685 linear_len = skb_len;
5686 skb = napi_alloc_skb(napi, linear_len + headroom);
5687 if (!skb) {
5688 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5689 goto walk_packet;
5690 }
5691
5692use_skb:
5693 if (headroom)
5694 skb_reserve(skb, headroom);
5695walk_packet:
5696 while (skb_len) {
5697 int data_len = min(skb_len, (int)(element->length - offset));
5698 char *data = phys_to_virt(element->addr) + offset;
5699
5700 skb_len -= data_len;
5701 offset += data_len;
5702
5703 /* Extract data from current element: */
5704 if (skb && data_len) {
5705 if (linear_len) {
5706 unsigned int copy_len;
5707
5708 copy_len = min_t(unsigned int, linear_len,
5709 data_len);
5710
5711 skb_put_data(skb, data, copy_len);
5712 linear_len -= copy_len;
5713 data_len -= copy_len;
5714 data += copy_len;
5715 }
5716
5717 if (data_len)
5718 qeth_create_skb_frag(skb, data, data_len);
5719 }
5720
5721 /* Step forward to next element: */
5722 if (skb_len) {
5723 if (qeth_is_last_sbale(element)) {
5724 QETH_CARD_TEXT(card, 4, "unexeob");
5725 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5726 if (skb) {
5727 if (uses_frags)
5728 napi_free_frags(napi);
5729 else
5730 dev_kfree_skb_any(skb);
5731 QETH_CARD_STAT_INC(card,
5732 rx_length_errors);
5733 }
5734 return -EMSGSIZE;
5735 }
5736 element++;
5737 offset = 0;
5738 }
5739 }
5740
5741 /* This packet was skipped, go get another one: */
5742 if (!skb)
5743 goto next_packet;
5744
5745 *element_no = element - &buffer->element[0];
5746 *__offset = offset;
5747
5748 qeth_receive_skb(card, skb, hdr, uses_frags);
5749 return 0;
5750}
5751
5752static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5753 struct qeth_qdio_buffer *buf, bool *done)
5754{
5755 unsigned int work_done = 0;
5756
5757 while (budget) {
5758 if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5759 &card->rx.e_offset)) {
5760 *done = true;
5761 break;
5762 }
5763
5764 work_done++;
5765 budget--;
5766 }
5767
5768 return work_done;
5769}
5770
5771static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5772{
5773 unsigned int work_done = 0;
5774
5775 while (budget > 0) {
5776 struct qeth_qdio_buffer *buffer;
5777 unsigned int skbs_done = 0;
5778 bool done = false;
5779
5780 /* Fetch completed RX buffers: */
5781 if (!card->rx.b_count) {
5782 card->rx.qdio_err = 0;
5783 card->rx.b_count = qdio_get_next_buffers(
5784 card->data.ccwdev, 0, &card->rx.b_index,
5785 &card->rx.qdio_err);
5786 if (card->rx.b_count <= 0) {
5787 card->rx.b_count = 0;
5788 break;
5789 }
5790 }
5791
5792 /* Process one completed RX buffer: */
5793 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5794 if (!(card->rx.qdio_err &&
5795 qeth_check_qdio_errors(card, buffer->buffer,
5796 card->rx.qdio_err, "qinerr")))
5797 skbs_done = qeth_extract_skbs(card, budget, buffer,
5798 &done);
5799 else
5800 done = true;
5801
5802 work_done += skbs_done;
5803 budget -= skbs_done;
5804
5805 if (done) {
5806 QETH_CARD_STAT_INC(card, rx_bufs);
5807 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5808 qeth_queue_input_buffer(card, card->rx.b_index);
5809 card->rx.b_count--;
5810
5811 /* Step forward to next buffer: */
5812 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5813 card->rx.buf_element = 0;
5814 card->rx.e_offset = 0;
5815 }
5816 }
5817
5818 return work_done;
5819}
5820
5821static void qeth_cq_poll(struct qeth_card *card)
5822{
5823 unsigned int work_done = 0;
5824
5825 while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5826 unsigned int start, error;
5827 int completed;
5828
5829 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5830 &error);
5831 if (completed <= 0)
5832 return;
5833
5834 qeth_qdio_cq_handler(card, error, 1, start, completed);
5835 work_done += completed;
5836 }
5837}
5838
5839int qeth_poll(struct napi_struct *napi, int budget)
5840{
5841 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5842 unsigned int work_done;
5843
5844 work_done = qeth_rx_poll(card, budget);
5845
5846 if (card->options.cq == QETH_CQ_ENABLED)
5847 qeth_cq_poll(card);
5848
5849 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5850 if (budget && work_done >= budget)
5851 return work_done;
5852
5853 if (napi_complete_done(napi, work_done) &&
5854 qdio_start_irq(CARD_DDEV(card)))
5855 napi_schedule(napi);
5856
5857 return work_done;
5858}
5859EXPORT_SYMBOL_GPL(qeth_poll);
5860
5861static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5862 unsigned int bidx, bool error, int budget)
5863{
5864 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5865 u8 sflags = buffer->buffer->element[15].sflags;
5866 struct qeth_card *card = queue->card;
5867
5868 if (queue->bufstates && (queue->bufstates[bidx].flags &
5869 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5870 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5871
5872 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5873 QETH_QDIO_BUF_PENDING) ==
5874 QETH_QDIO_BUF_PRIMED)
5875 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5876
5877 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5878
5879 /* prepare the queue slot for re-use: */
5880 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5881 if (qeth_init_qdio_out_buf(queue, bidx)) {
5882 QETH_CARD_TEXT(card, 2, "outofbuf");
5883 qeth_schedule_recovery(card);
5884 }
5885
5886 return;
5887 }
5888
5889 if (card->options.cq == QETH_CQ_ENABLED)
5890 qeth_notify_skbs(queue, buffer,
5891 qeth_compute_cq_notification(sflags, 0));
5892 qeth_clear_output_buffer(queue, buffer, error, budget);
5893}
5894
5895static int qeth_tx_poll(struct napi_struct *napi, int budget)
5896{
5897 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5898 unsigned int queue_no = queue->queue_no;
5899 struct qeth_card *card = queue->card;
5900 struct net_device *dev = card->dev;
5901 unsigned int work_done = 0;
5902 struct netdev_queue *txq;
5903
5904 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5905
5906 while (1) {
5907 unsigned int start, error, i;
5908 unsigned int packets = 0;
5909 unsigned int bytes = 0;
5910 int completed;
5911
5912 if (qeth_out_queue_is_empty(queue)) {
5913 napi_complete(napi);
5914 return 0;
5915 }
5916
5917 /* Give the CPU a breather: */
5918 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5919 QETH_TXQ_STAT_INC(queue, completion_yield);
5920 if (napi_complete_done(napi, 0))
5921 napi_schedule(napi);
5922 return 0;
5923 }
5924
5925 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5926 &start, &error);
5927 if (completed <= 0) {
5928 /* Ensure we see TX completion for pending work: */
5929 if (napi_complete_done(napi, 0))
5930 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
5931 return 0;
5932 }
5933
5934 for (i = start; i < start + completed; i++) {
5935 struct qeth_qdio_out_buffer *buffer;
5936 unsigned int bidx = QDIO_BUFNR(i);
5937
5938 buffer = queue->bufs[bidx];
5939 packets += buffer->frames;
5940 bytes += buffer->bytes;
5941
5942 qeth_handle_send_error(card, buffer, error);
5943 qeth_iqd_tx_complete(queue, bidx, error, budget);
5944 qeth_cleanup_handled_pending(queue, bidx, false);
5945 }
5946
5947 netdev_tx_completed_queue(txq, packets, bytes);
5948 atomic_sub(completed, &queue->used_buffers);
5949 work_done += completed;
5950
5951 /* xmit may have observed the full-condition, but not yet
5952 * stopped the txq. In which case the code below won't trigger.
5953 * So before returning, xmit will re-check the txq's fill level
5954 * and wake it up if needed.
5955 */
5956 if (netif_tx_queue_stopped(txq) &&
5957 !qeth_out_queue_is_full(queue))
5958 netif_tx_wake_queue(txq);
5959 }
5960}
5961
5962static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5963{
5964 if (!cmd->hdr.return_code)
5965 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5966 return cmd->hdr.return_code;
5967}
5968
5969static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5970 struct qeth_reply *reply,
5971 unsigned long data)
5972{
5973 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5974 struct qeth_ipa_caps *caps = reply->param;
5975
5976 if (qeth_setassparms_inspect_rc(cmd))
5977 return -EIO;
5978
5979 caps->supported = cmd->data.setassparms.data.caps.supported;
5980 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5981 return 0;
5982}
5983
5984int qeth_setassparms_cb(struct qeth_card *card,
5985 struct qeth_reply *reply, unsigned long data)
5986{
5987 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5988
5989 QETH_CARD_TEXT(card, 4, "defadpcb");
5990
5991 if (cmd->hdr.return_code)
5992 return -EIO;
5993
5994 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5995 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5996 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5997 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5998 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5999 return 0;
6000}
6001EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6002
6003struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6004 enum qeth_ipa_funcs ipa_func,
6005 u16 cmd_code,
6006 unsigned int data_length,
6007 enum qeth_prot_versions prot)
6008{
6009 struct qeth_ipacmd_setassparms *setassparms;
6010 struct qeth_ipacmd_setassparms_hdr *hdr;
6011 struct qeth_cmd_buffer *iob;
6012
6013 QETH_CARD_TEXT(card, 4, "getasscm");
6014 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6015 data_length +
6016 offsetof(struct qeth_ipacmd_setassparms,
6017 data));
6018 if (!iob)
6019 return NULL;
6020
6021 setassparms = &__ipa_cmd(iob)->data.setassparms;
6022 setassparms->assist_no = ipa_func;
6023
6024 hdr = &setassparms->hdr;
6025 hdr->length = sizeof(*hdr) + data_length;
6026 hdr->command_code = cmd_code;
6027 return iob;
6028}
6029EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6030
6031int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6032 enum qeth_ipa_funcs ipa_func,
6033 u16 cmd_code, u32 *data,
6034 enum qeth_prot_versions prot)
6035{
6036 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6037 struct qeth_cmd_buffer *iob;
6038
6039 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6040 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6041 if (!iob)
6042 return -ENOMEM;
6043
6044 if (data)
6045 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6046 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6047}
6048EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6049
6050static void qeth_unregister_dbf_views(void)
6051{
6052 int x;
6053 for (x = 0; x < QETH_DBF_INFOS; x++) {
6054 debug_unregister(qeth_dbf[x].id);
6055 qeth_dbf[x].id = NULL;
6056 }
6057}
6058
6059void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6060{
6061 char dbf_txt_buf[32];
6062 va_list args;
6063
6064 if (!debug_level_enabled(id, level))
6065 return;
6066 va_start(args, fmt);
6067 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6068 va_end(args);
6069 debug_text_event(id, level, dbf_txt_buf);
6070}
6071EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6072
6073static int qeth_register_dbf_views(void)
6074{
6075 int ret;
6076 int x;
6077
6078 for (x = 0; x < QETH_DBF_INFOS; x++) {
6079 /* register the areas */
6080 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6081 qeth_dbf[x].pages,
6082 qeth_dbf[x].areas,
6083 qeth_dbf[x].len);
6084 if (qeth_dbf[x].id == NULL) {
6085 qeth_unregister_dbf_views();
6086 return -ENOMEM;
6087 }
6088
6089 /* register a view */
6090 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6091 if (ret) {
6092 qeth_unregister_dbf_views();
6093 return ret;
6094 }
6095
6096 /* set a passing level */
6097 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6098 }
6099
6100 return 0;
6101}
6102
6103static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
6104
6105int qeth_core_load_discipline(struct qeth_card *card,
6106 enum qeth_discipline_id discipline)
6107{
6108 mutex_lock(&qeth_mod_mutex);
6109 switch (discipline) {
6110 case QETH_DISCIPLINE_LAYER3:
6111 card->discipline = try_then_request_module(
6112 symbol_get(qeth_l3_discipline), "qeth_l3");
6113 break;
6114 case QETH_DISCIPLINE_LAYER2:
6115 card->discipline = try_then_request_module(
6116 symbol_get(qeth_l2_discipline), "qeth_l2");
6117 break;
6118 default:
6119 break;
6120 }
6121 mutex_unlock(&qeth_mod_mutex);
6122
6123 if (!card->discipline) {
6124 dev_err(&card->gdev->dev, "There is no kernel module to "
6125 "support discipline %d\n", discipline);
6126 return -EINVAL;
6127 }
6128
6129 card->options.layer = discipline;
6130 return 0;
6131}
6132
6133void qeth_core_free_discipline(struct qeth_card *card)
6134{
6135 if (IS_LAYER2(card))
6136 symbol_put(qeth_l2_discipline);
6137 else
6138 symbol_put(qeth_l3_discipline);
6139 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6140 card->discipline = NULL;
6141}
6142
6143const struct device_type qeth_generic_devtype = {
6144 .name = "qeth_generic",
6145 .groups = qeth_generic_attr_groups,
6146};
6147EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6148
6149static const struct device_type qeth_osn_devtype = {
6150 .name = "qeth_osn",
6151 .groups = qeth_osn_attr_groups,
6152};
6153
6154#define DBF_NAME_LEN 20
6155
6156struct qeth_dbf_entry {
6157 char dbf_name[DBF_NAME_LEN];
6158 debug_info_t *dbf_info;
6159 struct list_head dbf_list;
6160};
6161
6162static LIST_HEAD(qeth_dbf_list);
6163static DEFINE_MUTEX(qeth_dbf_list_mutex);
6164
6165static debug_info_t *qeth_get_dbf_entry(char *name)
6166{
6167 struct qeth_dbf_entry *entry;
6168 debug_info_t *rc = NULL;
6169
6170 mutex_lock(&qeth_dbf_list_mutex);
6171 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6172 if (strcmp(entry->dbf_name, name) == 0) {
6173 rc = entry->dbf_info;
6174 break;
6175 }
6176 }
6177 mutex_unlock(&qeth_dbf_list_mutex);
6178 return rc;
6179}
6180
6181static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6182{
6183 struct qeth_dbf_entry *new_entry;
6184
6185 card->debug = debug_register(name, 2, 1, 8);
6186 if (!card->debug) {
6187 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6188 goto err;
6189 }
6190 if (debug_register_view(card->debug, &debug_hex_ascii_view))
6191 goto err_dbg;
6192 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6193 if (!new_entry)
6194 goto err_dbg;
6195 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6196 new_entry->dbf_info = card->debug;
6197 mutex_lock(&qeth_dbf_list_mutex);
6198 list_add(&new_entry->dbf_list, &qeth_dbf_list);
6199 mutex_unlock(&qeth_dbf_list_mutex);
6200
6201 return 0;
6202
6203err_dbg:
6204 debug_unregister(card->debug);
6205err:
6206 return -ENOMEM;
6207}
6208
6209static void qeth_clear_dbf_list(void)
6210{
6211 struct qeth_dbf_entry *entry, *tmp;
6212
6213 mutex_lock(&qeth_dbf_list_mutex);
6214 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6215 list_del(&entry->dbf_list);
6216 debug_unregister(entry->dbf_info);
6217 kfree(entry);
6218 }
6219 mutex_unlock(&qeth_dbf_list_mutex);
6220}
6221
6222static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6223{
6224 struct net_device *dev;
6225 struct qeth_priv *priv;
6226
6227 switch (card->info.type) {
6228 case QETH_CARD_TYPE_IQD:
6229 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6230 ether_setup, QETH_MAX_OUT_QUEUES, 1);
6231 break;
6232 case QETH_CARD_TYPE_OSM:
6233 dev = alloc_etherdev(sizeof(*priv));
6234 break;
6235 case QETH_CARD_TYPE_OSN:
6236 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6237 ether_setup);
6238 break;
6239 default:
6240 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6241 }
6242
6243 if (!dev)
6244 return NULL;
6245
6246 priv = netdev_priv(dev);
6247 priv->rx_copybreak = QETH_RX_COPYBREAK;
6248
6249 dev->ml_priv = card;
6250 dev->watchdog_timeo = QETH_TX_TIMEOUT;
6251 dev->min_mtu = IS_OSN(card) ? 64 : 576;
6252 /* initialized when device first goes online: */
6253 dev->max_mtu = 0;
6254 dev->mtu = 0;
6255 SET_NETDEV_DEV(dev, &card->gdev->dev);
6256 netif_carrier_off(dev);
6257
6258 dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
6259 &qeth_ethtool_ops;
6260
6261 return dev;
6262}
6263
6264struct net_device *qeth_clone_netdev(struct net_device *orig)
6265{
6266 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6267
6268 if (!clone)
6269 return NULL;
6270
6271 clone->dev_port = orig->dev_port;
6272 return clone;
6273}
6274
6275int qeth_setup_netdev(struct qeth_card *card)
6276{
6277 struct net_device *dev = card->dev;
6278 unsigned int num_tx_queues;
6279
6280 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6281 dev->hw_features |= NETIF_F_SG;
6282 dev->vlan_features |= NETIF_F_SG;
6283
6284 if (IS_IQD(card)) {
6285 dev->features |= NETIF_F_SG;
6286 num_tx_queues = QETH_IQD_MIN_TXQ;
6287 } else if (IS_VM_NIC(card)) {
6288 num_tx_queues = 1;
6289 } else {
6290 num_tx_queues = dev->real_num_tx_queues;
6291 }
6292
6293 return qeth_set_real_num_tx_queues(card, num_tx_queues);
6294}
6295EXPORT_SYMBOL_GPL(qeth_setup_netdev);
6296
6297static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6298{
6299 struct qeth_card *card;
6300 struct device *dev;
6301 int rc;
6302 enum qeth_discipline_id enforced_disc;
6303 char dbf_name[DBF_NAME_LEN];
6304
6305 QETH_DBF_TEXT(SETUP, 2, "probedev");
6306
6307 dev = &gdev->dev;
6308 if (!get_device(dev))
6309 return -ENODEV;
6310
6311 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6312
6313 card = qeth_alloc_card(gdev);
6314 if (!card) {
6315 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6316 rc = -ENOMEM;
6317 goto err_dev;
6318 }
6319
6320 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6321 dev_name(&gdev->dev));
6322 card->debug = qeth_get_dbf_entry(dbf_name);
6323 if (!card->debug) {
6324 rc = qeth_add_dbf_entry(card, dbf_name);
6325 if (rc)
6326 goto err_card;
6327 }
6328
6329 qeth_setup_card(card);
6330 card->dev = qeth_alloc_netdev(card);
6331 if (!card->dev) {
6332 rc = -ENOMEM;
6333 goto err_card;
6334 }
6335
6336 qeth_determine_capabilities(card);
6337 qeth_set_blkt_defaults(card);
6338
6339 card->qdio.no_out_queues = card->dev->num_tx_queues;
6340 rc = qeth_update_from_chp_desc(card);
6341 if (rc)
6342 goto err_chp_desc;
6343
6344 enforced_disc = qeth_enforce_discipline(card);
6345 switch (enforced_disc) {
6346 case QETH_DISCIPLINE_UNDETERMINED:
6347 gdev->dev.type = &qeth_generic_devtype;
6348 break;
6349 default:
6350 card->info.layer_enforced = true;
6351 rc = qeth_core_load_discipline(card, enforced_disc);
6352 if (rc)
6353 goto err_load;
6354
6355 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6356 card->discipline->devtype;
6357 rc = card->discipline->setup(card->gdev);
6358 if (rc)
6359 goto err_disc;
6360 break;
6361 }
6362
6363 return 0;
6364
6365err_disc:
6366 qeth_core_free_discipline(card);
6367err_load:
6368err_chp_desc:
6369 free_netdev(card->dev);
6370err_card:
6371 qeth_core_free_card(card);
6372err_dev:
6373 put_device(dev);
6374 return rc;
6375}
6376
6377static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6378{
6379 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6380
6381 QETH_CARD_TEXT(card, 2, "removedv");
6382
6383 if (card->discipline) {
6384 card->discipline->remove(gdev);
6385 qeth_core_free_discipline(card);
6386 }
6387
6388 qeth_free_qdio_queues(card);
6389
6390 free_netdev(card->dev);
6391 qeth_core_free_card(card);
6392 put_device(&gdev->dev);
6393}
6394
6395static int qeth_core_set_online(struct ccwgroup_device *gdev)
6396{
6397 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6398 int rc = 0;
6399 enum qeth_discipline_id def_discipline;
6400
6401 if (!card->discipline) {
6402 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6403 QETH_DISCIPLINE_LAYER2;
6404 rc = qeth_core_load_discipline(card, def_discipline);
6405 if (rc)
6406 goto err;
6407 rc = card->discipline->setup(card->gdev);
6408 if (rc) {
6409 qeth_core_free_discipline(card);
6410 goto err;
6411 }
6412 }
6413
6414 rc = qeth_set_online(card);
6415err:
6416 return rc;
6417}
6418
6419static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6420{
6421 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6422
6423 return qeth_set_offline(card, false);
6424}
6425
6426static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6427{
6428 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6429 qeth_set_allowed_threads(card, 0, 1);
6430 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6431 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6432 qeth_qdio_clear_card(card, 0);
6433 qeth_drain_output_queues(card);
6434 qdio_free(CARD_DDEV(card));
6435}
6436
6437static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6438 size_t count)
6439{
6440 int err;
6441
6442 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6443 buf);
6444
6445 return err ? err : count;
6446}
6447static DRIVER_ATTR_WO(group);
6448
6449static struct attribute *qeth_drv_attrs[] = {
6450 &driver_attr_group.attr,
6451 NULL,
6452};
6453static struct attribute_group qeth_drv_attr_group = {
6454 .attrs = qeth_drv_attrs,
6455};
6456static const struct attribute_group *qeth_drv_attr_groups[] = {
6457 &qeth_drv_attr_group,
6458 NULL,
6459};
6460
6461static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6462 .driver = {
6463 .groups = qeth_drv_attr_groups,
6464 .owner = THIS_MODULE,
6465 .name = "qeth",
6466 },
6467 .ccw_driver = &qeth_ccw_driver,
6468 .setup = qeth_core_probe_device,
6469 .remove = qeth_core_remove_device,
6470 .set_online = qeth_core_set_online,
6471 .set_offline = qeth_core_set_offline,
6472 .shutdown = qeth_core_shutdown,
6473};
6474
6475struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6476{
6477 struct ccwgroup_device *gdev;
6478 struct qeth_card *card;
6479
6480 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6481 if (!gdev)
6482 return NULL;
6483
6484 card = dev_get_drvdata(&gdev->dev);
6485 put_device(&gdev->dev);
6486 return card;
6487}
6488EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6489
6490int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6491{
6492 struct qeth_card *card = dev->ml_priv;
6493 struct mii_ioctl_data *mii_data;
6494 int rc = 0;
6495
6496 switch (cmd) {
6497 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6498 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6499 break;
6500 case SIOC_QETH_GET_CARD_TYPE:
6501 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6502 !IS_VM_NIC(card))
6503 return 1;
6504 return 0;
6505 case SIOCGMIIPHY:
6506 mii_data = if_mii(rq);
6507 mii_data->phy_id = 0;
6508 break;
6509 case SIOCGMIIREG:
6510 mii_data = if_mii(rq);
6511 if (mii_data->phy_id != 0)
6512 rc = -EINVAL;
6513 else
6514 mii_data->val_out = qeth_mdio_read(dev,
6515 mii_data->phy_id, mii_data->reg_num);
6516 break;
6517 case SIOC_QETH_QUERY_OAT:
6518 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6519 break;
6520 default:
6521 if (card->discipline->do_ioctl)
6522 rc = card->discipline->do_ioctl(dev, rq, cmd);
6523 else
6524 rc = -EOPNOTSUPP;
6525 }
6526 if (rc)
6527 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6528 return rc;
6529}
6530EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6531
6532static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6533 unsigned long data)
6534{
6535 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6536 u32 *features = reply->param;
6537
6538 if (qeth_setassparms_inspect_rc(cmd))
6539 return -EIO;
6540
6541 *features = cmd->data.setassparms.data.flags_32bit;
6542 return 0;
6543}
6544
6545static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6546 enum qeth_prot_versions prot)
6547{
6548 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6549 NULL, prot);
6550}
6551
6552static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6553 enum qeth_prot_versions prot, u8 *lp2lp)
6554{
6555 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6556 struct qeth_cmd_buffer *iob;
6557 struct qeth_ipa_caps caps;
6558 u32 features;
6559 int rc;
6560
6561 /* some L3 HW requires combined L3+L4 csum offload: */
6562 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6563 cstype == IPA_OUTBOUND_CHECKSUM)
6564 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6565
6566 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6567 prot);
6568 if (!iob)
6569 return -ENOMEM;
6570
6571 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6572 if (rc)
6573 return rc;
6574
6575 if ((required_features & features) != required_features) {
6576 qeth_set_csum_off(card, cstype, prot);
6577 return -EOPNOTSUPP;
6578 }
6579
6580 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6581 SETASS_DATA_SIZEOF(flags_32bit),
6582 prot);
6583 if (!iob) {
6584 qeth_set_csum_off(card, cstype, prot);
6585 return -ENOMEM;
6586 }
6587
6588 if (features & QETH_IPA_CHECKSUM_LP2LP)
6589 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6590 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6591 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6592 if (rc) {
6593 qeth_set_csum_off(card, cstype, prot);
6594 return rc;
6595 }
6596
6597 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6598 !qeth_ipa_caps_enabled(&caps, required_features)) {
6599 qeth_set_csum_off(card, cstype, prot);
6600 return -EOPNOTSUPP;
6601 }
6602
6603 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6604 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6605
6606 if (lp2lp)
6607 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6608
6609 return 0;
6610}
6611
6612static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6613 enum qeth_prot_versions prot, u8 *lp2lp)
6614{
6615 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6616 qeth_set_csum_off(card, cstype, prot);
6617}
6618
6619static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6620 unsigned long data)
6621{
6622 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6623 struct qeth_tso_start_data *tso_data = reply->param;
6624
6625 if (qeth_setassparms_inspect_rc(cmd))
6626 return -EIO;
6627
6628 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6629 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6630 return 0;
6631}
6632
6633static int qeth_set_tso_off(struct qeth_card *card,
6634 enum qeth_prot_versions prot)
6635{
6636 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6637 IPA_CMD_ASS_STOP, NULL, prot);
6638}
6639
6640static int qeth_set_tso_on(struct qeth_card *card,
6641 enum qeth_prot_versions prot)
6642{
6643 struct qeth_tso_start_data tso_data;
6644 struct qeth_cmd_buffer *iob;
6645 struct qeth_ipa_caps caps;
6646 int rc;
6647
6648 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6649 IPA_CMD_ASS_START, 0, prot);
6650 if (!iob)
6651 return -ENOMEM;
6652
6653 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6654 if (rc)
6655 return rc;
6656
6657 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6658 qeth_set_tso_off(card, prot);
6659 return -EOPNOTSUPP;
6660 }
6661
6662 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6663 IPA_CMD_ASS_ENABLE,
6664 SETASS_DATA_SIZEOF(caps), prot);
6665 if (!iob) {
6666 qeth_set_tso_off(card, prot);
6667 return -ENOMEM;
6668 }
6669
6670 /* enable TSO capability */
6671 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6672 QETH_IPA_LARGE_SEND_TCP;
6673 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6674 if (rc) {
6675 qeth_set_tso_off(card, prot);
6676 return rc;
6677 }
6678
6679 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6680 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6681 qeth_set_tso_off(card, prot);
6682 return -EOPNOTSUPP;
6683 }
6684
6685 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6686 tso_data.mss);
6687 return 0;
6688}
6689
6690static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6691 enum qeth_prot_versions prot)
6692{
6693 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6694}
6695
6696static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6697{
6698 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6699 int rc_ipv6;
6700
6701 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6702 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6703 QETH_PROT_IPV4, NULL);
6704 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6705 /* no/one Offload Assist available, so the rc is trivial */
6706 return rc_ipv4;
6707
6708 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6709 QETH_PROT_IPV6, NULL);
6710
6711 if (on)
6712 /* enable: success if any Assist is active */
6713 return (rc_ipv6) ? rc_ipv4 : 0;
6714
6715 /* disable: failure if any Assist is still active */
6716 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6717}
6718
6719/**
6720 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6721 * @dev: a net_device
6722 */
6723void qeth_enable_hw_features(struct net_device *dev)
6724{
6725 struct qeth_card *card = dev->ml_priv;
6726 netdev_features_t features;
6727
6728 features = dev->features;
6729 /* force-off any feature that might need an IPA sequence.
6730 * netdev_update_features() will restart them.
6731 */
6732 dev->features &= ~dev->hw_features;
6733 /* toggle VLAN filter, so that VIDs are re-programmed: */
6734 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6735 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6736 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6737 }
6738 netdev_update_features(dev);
6739 if (features != dev->features)
6740 dev_warn(&card->gdev->dev,
6741 "Device recovery failed to restore all offload features\n");
6742}
6743EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6744
6745static void qeth_check_restricted_features(struct qeth_card *card,
6746 netdev_features_t changed,
6747 netdev_features_t actual)
6748{
6749 netdev_features_t ipv6_features = NETIF_F_TSO6;
6750 netdev_features_t ipv4_features = NETIF_F_TSO;
6751
6752 if (!card->info.has_lp2lp_cso_v6)
6753 ipv6_features |= NETIF_F_IPV6_CSUM;
6754 if (!card->info.has_lp2lp_cso_v4)
6755 ipv4_features |= NETIF_F_IP_CSUM;
6756
6757 if ((changed & ipv6_features) && !(actual & ipv6_features))
6758 qeth_flush_local_addrs6(card);
6759 if ((changed & ipv4_features) && !(actual & ipv4_features))
6760 qeth_flush_local_addrs4(card);
6761}
6762
6763int qeth_set_features(struct net_device *dev, netdev_features_t features)
6764{
6765 struct qeth_card *card = dev->ml_priv;
6766 netdev_features_t changed = dev->features ^ features;
6767 int rc = 0;
6768
6769 QETH_CARD_TEXT(card, 2, "setfeat");
6770 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6771
6772 if ((changed & NETIF_F_IP_CSUM)) {
6773 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6774 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6775 &card->info.has_lp2lp_cso_v4);
6776 if (rc)
6777 changed ^= NETIF_F_IP_CSUM;
6778 }
6779 if (changed & NETIF_F_IPV6_CSUM) {
6780 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6781 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6782 &card->info.has_lp2lp_cso_v6);
6783 if (rc)
6784 changed ^= NETIF_F_IPV6_CSUM;
6785 }
6786 if (changed & NETIF_F_RXCSUM) {
6787 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6788 if (rc)
6789 changed ^= NETIF_F_RXCSUM;
6790 }
6791 if (changed & NETIF_F_TSO) {
6792 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6793 QETH_PROT_IPV4);
6794 if (rc)
6795 changed ^= NETIF_F_TSO;
6796 }
6797 if (changed & NETIF_F_TSO6) {
6798 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6799 QETH_PROT_IPV6);
6800 if (rc)
6801 changed ^= NETIF_F_TSO6;
6802 }
6803
6804 qeth_check_restricted_features(card, dev->features ^ features,
6805 dev->features ^ changed);
6806
6807 /* everything changed successfully? */
6808 if ((dev->features ^ features) == changed)
6809 return 0;
6810 /* something went wrong. save changed features and return error */
6811 dev->features ^= changed;
6812 return -EIO;
6813}
6814EXPORT_SYMBOL_GPL(qeth_set_features);
6815
6816netdev_features_t qeth_fix_features(struct net_device *dev,
6817 netdev_features_t features)
6818{
6819 struct qeth_card *card = dev->ml_priv;
6820
6821 QETH_CARD_TEXT(card, 2, "fixfeat");
6822 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6823 features &= ~NETIF_F_IP_CSUM;
6824 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6825 features &= ~NETIF_F_IPV6_CSUM;
6826 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6827 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6828 features &= ~NETIF_F_RXCSUM;
6829 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6830 features &= ~NETIF_F_TSO;
6831 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6832 features &= ~NETIF_F_TSO6;
6833
6834 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6835 return features;
6836}
6837EXPORT_SYMBOL_GPL(qeth_fix_features);
6838
6839netdev_features_t qeth_features_check(struct sk_buff *skb,
6840 struct net_device *dev,
6841 netdev_features_t features)
6842{
6843 /* Traffic with local next-hop is not eligible for some offloads: */
6844 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6845 struct qeth_card *card = dev->ml_priv;
6846 netdev_features_t restricted = 0;
6847
6848 if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6849 restricted |= NETIF_F_ALL_TSO;
6850
6851 switch (vlan_get_protocol(skb)) {
6852 case htons(ETH_P_IP):
6853 if (!card->info.has_lp2lp_cso_v4)
6854 restricted |= NETIF_F_IP_CSUM;
6855
6856 if (restricted && qeth_next_hop_is_local_v4(card, skb))
6857 features &= ~restricted;
6858 break;
6859 case htons(ETH_P_IPV6):
6860 if (!card->info.has_lp2lp_cso_v6)
6861 restricted |= NETIF_F_IPV6_CSUM;
6862
6863 if (restricted && qeth_next_hop_is_local_v6(card, skb))
6864 features &= ~restricted;
6865 break;
6866 default:
6867 break;
6868 }
6869 }
6870
6871 /* GSO segmentation builds skbs with
6872 * a (small) linear part for the headers, and
6873 * page frags for the data.
6874 * Compared to a linear skb, the header-only part consumes an
6875 * additional buffer element. This reduces buffer utilization, and
6876 * hurts throughput. So compress small segments into one element.
6877 */
6878 if (netif_needs_gso(skb, features)) {
6879 /* match skb_segment(): */
6880 unsigned int doffset = skb->data - skb_mac_header(skb);
6881 unsigned int hsize = skb_shinfo(skb)->gso_size;
6882 unsigned int hroom = skb_headroom(skb);
6883
6884 /* linearize only if resulting skb allocations are order-0: */
6885 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6886 features &= ~NETIF_F_SG;
6887 }
6888
6889 return vlan_features_check(skb, features);
6890}
6891EXPORT_SYMBOL_GPL(qeth_features_check);
6892
6893void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6894{
6895 struct qeth_card *card = dev->ml_priv;
6896 struct qeth_qdio_out_q *queue;
6897 unsigned int i;
6898
6899 QETH_CARD_TEXT(card, 5, "getstat");
6900
6901 stats->rx_packets = card->stats.rx_packets;
6902 stats->rx_bytes = card->stats.rx_bytes;
6903 stats->rx_errors = card->stats.rx_length_errors +
6904 card->stats.rx_frame_errors +
6905 card->stats.rx_fifo_errors;
6906 stats->rx_dropped = card->stats.rx_dropped_nomem +
6907 card->stats.rx_dropped_notsupp +
6908 card->stats.rx_dropped_runt;
6909 stats->multicast = card->stats.rx_multicast;
6910 stats->rx_length_errors = card->stats.rx_length_errors;
6911 stats->rx_frame_errors = card->stats.rx_frame_errors;
6912 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6913
6914 for (i = 0; i < card->qdio.no_out_queues; i++) {
6915 queue = card->qdio.out_qs[i];
6916
6917 stats->tx_packets += queue->stats.tx_packets;
6918 stats->tx_bytes += queue->stats.tx_bytes;
6919 stats->tx_errors += queue->stats.tx_errors;
6920 stats->tx_dropped += queue->stats.tx_dropped;
6921 }
6922}
6923EXPORT_SYMBOL_GPL(qeth_get_stats64);
6924
6925#define TC_IQD_UCAST 0
6926static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6927 unsigned int ucast_txqs)
6928{
6929 unsigned int prio;
6930
6931 /* IQD requires mcast traffic to be placed on a dedicated queue, and
6932 * qeth_iqd_select_queue() deals with this.
6933 * For unicast traffic, we defer the queue selection to the stack.
6934 * By installing a trivial prio map that spans over only the unicast
6935 * queues, we can encourage the stack to spread the ucast traffic evenly
6936 * without selecting the mcast queue.
6937 */
6938
6939 /* One traffic class, spanning over all active ucast queues: */
6940 netdev_set_num_tc(dev, 1);
6941 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6942 QETH_IQD_MIN_UCAST_TXQ);
6943
6944 /* Map all priorities to this traffic class: */
6945 for (prio = 0; prio <= TC_BITMASK; prio++)
6946 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6947}
6948
6949int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6950{
6951 struct net_device *dev = card->dev;
6952 int rc;
6953
6954 /* Per netif_setup_tc(), adjust the mapping first: */
6955 if (IS_IQD(card))
6956 qeth_iqd_set_prio_tc_map(dev, count - 1);
6957
6958 rc = netif_set_real_num_tx_queues(dev, count);
6959
6960 if (rc && IS_IQD(card))
6961 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6962
6963 return rc;
6964}
6965
6966u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6967 u8 cast_type, struct net_device *sb_dev)
6968{
6969 u16 txq;
6970
6971 if (cast_type != RTN_UNICAST)
6972 return QETH_IQD_MCAST_TXQ;
6973 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6974 return QETH_IQD_MIN_UCAST_TXQ;
6975
6976 txq = netdev_pick_tx(dev, skb, sb_dev);
6977 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
6978}
6979EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6980
6981int qeth_open(struct net_device *dev)
6982{
6983 struct qeth_card *card = dev->ml_priv;
6984
6985 QETH_CARD_TEXT(card, 4, "qethopen");
6986
6987 card->data.state = CH_STATE_UP;
6988 netif_tx_start_all_queues(dev);
6989
6990 napi_enable(&card->napi);
6991 local_bh_disable();
6992 napi_schedule(&card->napi);
6993 if (IS_IQD(card)) {
6994 struct qeth_qdio_out_q *queue;
6995 unsigned int i;
6996
6997 qeth_for_each_output_queue(card, queue, i) {
6998 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6999 QETH_NAPI_WEIGHT);
7000 napi_enable(&queue->napi);
7001 napi_schedule(&queue->napi);
7002 }
7003 }
7004 /* kick-start the NAPI softirq: */
7005 local_bh_enable();
7006 return 0;
7007}
7008EXPORT_SYMBOL_GPL(qeth_open);
7009
7010int qeth_stop(struct net_device *dev)
7011{
7012 struct qeth_card *card = dev->ml_priv;
7013
7014 QETH_CARD_TEXT(card, 4, "qethstop");
7015 if (IS_IQD(card)) {
7016 struct qeth_qdio_out_q *queue;
7017 unsigned int i;
7018
7019 /* Quiesce the NAPI instances: */
7020 qeth_for_each_output_queue(card, queue, i)
7021 napi_disable(&queue->napi);
7022
7023 /* Stop .ndo_start_xmit, might still access queue->napi. */
7024 netif_tx_disable(dev);
7025
7026 qeth_for_each_output_queue(card, queue, i) {
7027 del_timer_sync(&queue->timer);
7028 /* Queues may get re-allocated, so remove the NAPIs. */
7029 netif_napi_del(&queue->napi);
7030 }
7031 } else {
7032 netif_tx_disable(dev);
7033 }
7034
7035 napi_disable(&card->napi);
7036 qdio_stop_irq(CARD_DDEV(card));
7037
7038 return 0;
7039}
7040EXPORT_SYMBOL_GPL(qeth_stop);
7041
7042static int __init qeth_core_init(void)
7043{
7044 int rc;
7045
7046 pr_info("loading core functions\n");
7047
7048 qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7049
7050 rc = qeth_register_dbf_views();
7051 if (rc)
7052 goto dbf_err;
7053 qeth_core_root_dev = root_device_register("qeth");
7054 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7055 if (rc)
7056 goto register_err;
7057 qeth_core_header_cache =
7058 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7059 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7060 0, NULL);
7061 if (!qeth_core_header_cache) {
7062 rc = -ENOMEM;
7063 goto slab_err;
7064 }
7065 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7066 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7067 if (!qeth_qdio_outbuf_cache) {
7068 rc = -ENOMEM;
7069 goto cqslab_err;
7070 }
7071 rc = ccw_driver_register(&qeth_ccw_driver);
7072 if (rc)
7073 goto ccw_err;
7074 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7075 if (rc)
7076 goto ccwgroup_err;
7077
7078 return 0;
7079
7080ccwgroup_err:
7081 ccw_driver_unregister(&qeth_ccw_driver);
7082ccw_err:
7083 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7084cqslab_err:
7085 kmem_cache_destroy(qeth_core_header_cache);
7086slab_err:
7087 root_device_unregister(qeth_core_root_dev);
7088register_err:
7089 qeth_unregister_dbf_views();
7090dbf_err:
7091 debugfs_remove_recursive(qeth_debugfs_root);
7092 pr_err("Initializing the qeth device driver failed\n");
7093 return rc;
7094}
7095
7096static void __exit qeth_core_exit(void)
7097{
7098 qeth_clear_dbf_list();
7099 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7100 ccw_driver_unregister(&qeth_ccw_driver);
7101 kmem_cache_destroy(qeth_qdio_outbuf_cache);
7102 kmem_cache_destroy(qeth_core_header_cache);
7103 root_device_unregister(qeth_core_root_dev);
7104 qeth_unregister_dbf_views();
7105 debugfs_remove_recursive(qeth_debugfs_root);
7106 pr_info("core functions removed\n");
7107}
7108
7109module_init(qeth_core_init);
7110module_exit(qeth_core_exit);
7111MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7112MODULE_DESCRIPTION("qeth core functions");
7113MODULE_LICENSE("GPL");