Merge tag 'tegra-for-5.2-arm-dt' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5  *               Frank Pavlic <fpavlic@de.ibm.com>,
6  *               Thomas Spatzier <tspat@de.ibm.com>,
7  *               Frank Blaschka <frank.blaschka@de.ibm.com>
8  */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/kthread.h>
24 #include <linux/slab.h>
25 #include <linux/if_vlan.h>
26 #include <linux/netdevice.h>
27 #include <linux/netdev_features.h>
28 #include <linux/skbuff.h>
29 #include <linux/vmalloc.h>
30
31 #include <net/iucv/af_iucv.h>
32 #include <net/dsfield.h>
33
34 #include <asm/ebcdic.h>
35 #include <asm/chpid.h>
36 #include <asm/io.h>
37 #include <asm/sysinfo.h>
38 #include <asm/diag.h>
39 #include <asm/cio.h>
40 #include <asm/ccwdev.h>
41 #include <asm/cpcmd.h>
42
43 #include "qeth_core.h"
44
45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
46         /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
47         /*                   N  P  A    M  L  V                      H  */
48         [QETH_DBF_SETUP] = {"qeth_setup",
49                                 8, 1,   8, 5, &debug_hex_ascii_view, NULL},
50         [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
51                             &debug_sprintf_view, NULL},
52         [QETH_DBF_CTRL]  = {"qeth_control",
53                 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
54 };
55 EXPORT_SYMBOL_GPL(qeth_dbf);
56
57 struct kmem_cache *qeth_core_header_cache;
58 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
59 static struct kmem_cache *qeth_qdio_outbuf_cache;
60
61 static struct device *qeth_core_root_dev;
62 static struct lock_class_key qdio_out_skb_queue_key;
63
64 static void qeth_send_control_data_cb(struct qeth_card *card,
65                                       struct qeth_channel *channel,
66                                       struct qeth_cmd_buffer *iob);
67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_buffers(struct qeth_card *);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72                 struct qeth_qdio_out_buffer *buf,
73                 enum iucv_tx_notify notification);
74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76
77 static void qeth_close_dev_handler(struct work_struct *work)
78 {
79         struct qeth_card *card;
80
81         card = container_of(work, struct qeth_card, close_dev_work);
82         QETH_CARD_TEXT(card, 2, "cldevhdl");
83         ccwgroup_set_offline(card->gdev);
84 }
85
86 static const char *qeth_get_cardname(struct qeth_card *card)
87 {
88         if (card->info.guestlan) {
89                 switch (card->info.type) {
90                 case QETH_CARD_TYPE_OSD:
91                         return " Virtual NIC QDIO";
92                 case QETH_CARD_TYPE_IQD:
93                         return " Virtual NIC Hiper";
94                 case QETH_CARD_TYPE_OSM:
95                         return " Virtual NIC QDIO - OSM";
96                 case QETH_CARD_TYPE_OSX:
97                         return " Virtual NIC QDIO - OSX";
98                 default:
99                         return " unknown";
100                 }
101         } else {
102                 switch (card->info.type) {
103                 case QETH_CARD_TYPE_OSD:
104                         return " OSD Express";
105                 case QETH_CARD_TYPE_IQD:
106                         return " HiperSockets";
107                 case QETH_CARD_TYPE_OSN:
108                         return " OSN QDIO";
109                 case QETH_CARD_TYPE_OSM:
110                         return " OSM QDIO";
111                 case QETH_CARD_TYPE_OSX:
112                         return " OSX QDIO";
113                 default:
114                         return " unknown";
115                 }
116         }
117         return " n/a";
118 }
119
120 /* max length to be returned: 14 */
121 const char *qeth_get_cardname_short(struct qeth_card *card)
122 {
123         if (card->info.guestlan) {
124                 switch (card->info.type) {
125                 case QETH_CARD_TYPE_OSD:
126                         return "Virt.NIC QDIO";
127                 case QETH_CARD_TYPE_IQD:
128                         return "Virt.NIC Hiper";
129                 case QETH_CARD_TYPE_OSM:
130                         return "Virt.NIC OSM";
131                 case QETH_CARD_TYPE_OSX:
132                         return "Virt.NIC OSX";
133                 default:
134                         return "unknown";
135                 }
136         } else {
137                 switch (card->info.type) {
138                 case QETH_CARD_TYPE_OSD:
139                         switch (card->info.link_type) {
140                         case QETH_LINK_TYPE_FAST_ETH:
141                                 return "OSD_100";
142                         case QETH_LINK_TYPE_HSTR:
143                                 return "HSTR";
144                         case QETH_LINK_TYPE_GBIT_ETH:
145                                 return "OSD_1000";
146                         case QETH_LINK_TYPE_10GBIT_ETH:
147                                 return "OSD_10GIG";
148                         case QETH_LINK_TYPE_25GBIT_ETH:
149                                 return "OSD_25GIG";
150                         case QETH_LINK_TYPE_LANE_ETH100:
151                                 return "OSD_FE_LANE";
152                         case QETH_LINK_TYPE_LANE_TR:
153                                 return "OSD_TR_LANE";
154                         case QETH_LINK_TYPE_LANE_ETH1000:
155                                 return "OSD_GbE_LANE";
156                         case QETH_LINK_TYPE_LANE:
157                                 return "OSD_ATM_LANE";
158                         default:
159                                 return "OSD_Express";
160                         }
161                 case QETH_CARD_TYPE_IQD:
162                         return "HiperSockets";
163                 case QETH_CARD_TYPE_OSN:
164                         return "OSN";
165                 case QETH_CARD_TYPE_OSM:
166                         return "OSM_1000";
167                 case QETH_CARD_TYPE_OSX:
168                         return "OSX_10GIG";
169                 default:
170                         return "unknown";
171                 }
172         }
173         return "n/a";
174 }
175
176 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
177                          int clear_start_mask)
178 {
179         unsigned long flags;
180
181         spin_lock_irqsave(&card->thread_mask_lock, flags);
182         card->thread_allowed_mask = threads;
183         if (clear_start_mask)
184                 card->thread_start_mask &= threads;
185         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
186         wake_up(&card->wait_q);
187 }
188 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
189
190 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
191 {
192         unsigned long flags;
193         int rc = 0;
194
195         spin_lock_irqsave(&card->thread_mask_lock, flags);
196         rc = (card->thread_running_mask & threads);
197         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
198         return rc;
199 }
200 EXPORT_SYMBOL_GPL(qeth_threads_running);
201
202 void qeth_clear_working_pool_list(struct qeth_card *card)
203 {
204         struct qeth_buffer_pool_entry *pool_entry, *tmp;
205
206         QETH_CARD_TEXT(card, 5, "clwrklst");
207         list_for_each_entry_safe(pool_entry, tmp,
208                             &card->qdio.in_buf_pool.entry_list, list){
209                         list_del(&pool_entry->list);
210         }
211 }
212 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
213
214 static int qeth_alloc_buffer_pool(struct qeth_card *card)
215 {
216         struct qeth_buffer_pool_entry *pool_entry;
217         void *ptr;
218         int i, j;
219
220         QETH_CARD_TEXT(card, 5, "alocpool");
221         for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
222                 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
223                 if (!pool_entry) {
224                         qeth_free_buffer_pool(card);
225                         return -ENOMEM;
226                 }
227                 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
228                         ptr = (void *) __get_free_page(GFP_KERNEL);
229                         if (!ptr) {
230                                 while (j > 0)
231                                         free_page((unsigned long)
232                                                   pool_entry->elements[--j]);
233                                 kfree(pool_entry);
234                                 qeth_free_buffer_pool(card);
235                                 return -ENOMEM;
236                         }
237                         pool_entry->elements[j] = ptr;
238                 }
239                 list_add(&pool_entry->init_list,
240                          &card->qdio.init_pool.entry_list);
241         }
242         return 0;
243 }
244
245 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
246 {
247         QETH_CARD_TEXT(card, 2, "realcbp");
248
249         if (card->state != CARD_STATE_DOWN)
250                 return -EPERM;
251
252         /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
253         qeth_clear_working_pool_list(card);
254         qeth_free_buffer_pool(card);
255         card->qdio.in_buf_pool.buf_count = bufcnt;
256         card->qdio.init_pool.buf_count = bufcnt;
257         return qeth_alloc_buffer_pool(card);
258 }
259 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
260
261 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
262 {
263         if (!q)
264                 return;
265
266         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
267         kfree(q);
268 }
269
270 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
271 {
272         struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
273         int i;
274
275         if (!q)
276                 return NULL;
277
278         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
279                 kfree(q);
280                 return NULL;
281         }
282
283         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
284                 q->bufs[i].buffer = q->qdio_bufs[i];
285
286         QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
287         return q;
288 }
289
290 static int qeth_cq_init(struct qeth_card *card)
291 {
292         int rc;
293
294         if (card->options.cq == QETH_CQ_ENABLED) {
295                 QETH_DBF_TEXT(SETUP, 2, "cqinit");
296                 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
297                                    QDIO_MAX_BUFFERS_PER_Q);
298                 card->qdio.c_q->next_buf_to_init = 127;
299                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
300                              card->qdio.no_in_queues - 1, 0,
301                              127);
302                 if (rc) {
303                         QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
304                         goto out;
305                 }
306         }
307         rc = 0;
308 out:
309         return rc;
310 }
311
312 static int qeth_alloc_cq(struct qeth_card *card)
313 {
314         int rc;
315
316         if (card->options.cq == QETH_CQ_ENABLED) {
317                 int i;
318                 struct qdio_outbuf_state *outbuf_states;
319
320                 QETH_DBF_TEXT(SETUP, 2, "cqon");
321                 card->qdio.c_q = qeth_alloc_qdio_queue();
322                 if (!card->qdio.c_q) {
323                         rc = -1;
324                         goto kmsg_out;
325                 }
326                 card->qdio.no_in_queues = 2;
327                 card->qdio.out_bufstates =
328                         kcalloc(card->qdio.no_out_queues *
329                                         QDIO_MAX_BUFFERS_PER_Q,
330                                 sizeof(struct qdio_outbuf_state),
331                                 GFP_KERNEL);
332                 outbuf_states = card->qdio.out_bufstates;
333                 if (outbuf_states == NULL) {
334                         rc = -1;
335                         goto free_cq_out;
336                 }
337                 for (i = 0; i < card->qdio.no_out_queues; ++i) {
338                         card->qdio.out_qs[i]->bufstates = outbuf_states;
339                         outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
340                 }
341         } else {
342                 QETH_DBF_TEXT(SETUP, 2, "nocq");
343                 card->qdio.c_q = NULL;
344                 card->qdio.no_in_queues = 1;
345         }
346         QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
347         rc = 0;
348 out:
349         return rc;
350 free_cq_out:
351         qeth_free_qdio_queue(card->qdio.c_q);
352         card->qdio.c_q = NULL;
353 kmsg_out:
354         dev_err(&card->gdev->dev, "Failed to create completion queue\n");
355         goto out;
356 }
357
358 static void qeth_free_cq(struct qeth_card *card)
359 {
360         if (card->qdio.c_q) {
361                 --card->qdio.no_in_queues;
362                 qeth_free_qdio_queue(card->qdio.c_q);
363                 card->qdio.c_q = NULL;
364         }
365         kfree(card->qdio.out_bufstates);
366         card->qdio.out_bufstates = NULL;
367 }
368
369 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
370                                                         int delayed)
371 {
372         enum iucv_tx_notify n;
373
374         switch (sbalf15) {
375         case 0:
376                 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
377                 break;
378         case 4:
379         case 16:
380         case 17:
381         case 18:
382                 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
383                         TX_NOTIFY_UNREACHABLE;
384                 break;
385         default:
386                 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
387                         TX_NOTIFY_GENERALERROR;
388                 break;
389         }
390
391         return n;
392 }
393
394 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
395                                          int forced_cleanup)
396 {
397         if (q->card->options.cq != QETH_CQ_ENABLED)
398                 return;
399
400         if (q->bufs[bidx]->next_pending != NULL) {
401                 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
402                 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
403
404                 while (c) {
405                         if (forced_cleanup ||
406                             atomic_read(&c->state) ==
407                               QETH_QDIO_BUF_HANDLED_DELAYED) {
408                                 struct qeth_qdio_out_buffer *f = c;
409                                 QETH_CARD_TEXT(f->q->card, 5, "fp");
410                                 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
411                                 /* release here to avoid interleaving between
412                                    outbound tasklet and inbound tasklet
413                                    regarding notifications and lifecycle */
414                                 qeth_release_skbs(c);
415
416                                 c = f->next_pending;
417                                 WARN_ON_ONCE(head->next_pending != f);
418                                 head->next_pending = c;
419                                 kmem_cache_free(qeth_qdio_outbuf_cache, f);
420                         } else {
421                                 head = c;
422                                 c = c->next_pending;
423                         }
424
425                 }
426         }
427         if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
428                                         QETH_QDIO_BUF_HANDLED_DELAYED)) {
429                 /* for recovery situations */
430                 qeth_init_qdio_out_buf(q, bidx);
431                 QETH_CARD_TEXT(q->card, 2, "clprecov");
432         }
433 }
434
435
436 static void qeth_qdio_handle_aob(struct qeth_card *card,
437                                  unsigned long phys_aob_addr)
438 {
439         struct qaob *aob;
440         struct qeth_qdio_out_buffer *buffer;
441         enum iucv_tx_notify notification;
442         unsigned int i;
443
444         aob = (struct qaob *) phys_to_virt(phys_aob_addr);
445         QETH_CARD_TEXT(card, 5, "haob");
446         QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
447         buffer = (struct qeth_qdio_out_buffer *) aob->user1;
448         QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
449
450         if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
451                            QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
452                 notification = TX_NOTIFY_OK;
453         } else {
454                 WARN_ON_ONCE(atomic_read(&buffer->state) !=
455                                                         QETH_QDIO_BUF_PENDING);
456                 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
457                 notification = TX_NOTIFY_DELAYED_OK;
458         }
459
460         if (aob->aorc != 0)  {
461                 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
462                 notification = qeth_compute_cq_notification(aob->aorc, 1);
463         }
464         qeth_notify_skbs(buffer->q, buffer, notification);
465
466         /* Free dangling allocations. The attached skbs are handled by
467          * qeth_cleanup_handled_pending().
468          */
469         for (i = 0;
470              i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
471              i++) {
472                 if (aob->sba[i] && buffer->is_header[i])
473                         kmem_cache_free(qeth_core_header_cache,
474                                         (void *) aob->sba[i]);
475         }
476         atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
477
478         qdio_release_aob(aob);
479 }
480
481 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
482 {
483         return card->options.cq == QETH_CQ_ENABLED &&
484             card->qdio.c_q != NULL &&
485             queue != 0 &&
486             queue == card->qdio.no_in_queues - 1;
487 }
488
489 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
490 {
491         ccw->cmd_code = cmd_code;
492         ccw->flags = CCW_FLAG_SLI;
493         ccw->count = len;
494         ccw->cda = (__u32) __pa(data);
495 }
496
497 static int __qeth_issue_next_read(struct qeth_card *card)
498 {
499         struct qeth_channel *channel = &card->read;
500         struct qeth_cmd_buffer *iob;
501         int rc;
502
503         QETH_CARD_TEXT(card, 5, "issnxrd");
504         if (channel->state != CH_STATE_UP)
505                 return -EIO;
506         iob = qeth_get_buffer(channel);
507         if (!iob) {
508                 dev_warn(&card->gdev->dev, "The qeth device driver "
509                         "failed to recover an error on the device\n");
510                 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
511                                  CARD_DEVID(card));
512                 return -ENOMEM;
513         }
514         qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
515         QETH_CARD_TEXT(card, 6, "noirqpnd");
516         rc = ccw_device_start(channel->ccwdev, channel->ccw,
517                               (addr_t) iob, 0, 0);
518         if (rc) {
519                 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
520                                  rc, CARD_DEVID(card));
521                 atomic_set(&channel->irq_pending, 0);
522                 qeth_release_buffer(channel, iob);
523                 card->read_or_write_problem = 1;
524                 qeth_schedule_recovery(card);
525                 wake_up(&card->wait_q);
526         }
527         return rc;
528 }
529
530 static int qeth_issue_next_read(struct qeth_card *card)
531 {
532         int ret;
533
534         spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
535         ret = __qeth_issue_next_read(card);
536         spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
537
538         return ret;
539 }
540
541 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
542 {
543         struct qeth_reply *reply;
544
545         reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
546         if (reply) {
547                 refcount_set(&reply->refcnt, 1);
548                 atomic_set(&reply->received, 0);
549                 init_waitqueue_head(&reply->wait_q);
550         }
551         return reply;
552 }
553
554 static void qeth_get_reply(struct qeth_reply *reply)
555 {
556         refcount_inc(&reply->refcnt);
557 }
558
559 static void qeth_put_reply(struct qeth_reply *reply)
560 {
561         if (refcount_dec_and_test(&reply->refcnt))
562                 kfree(reply);
563 }
564
565 static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply)
566 {
567         spin_lock_irq(&card->lock);
568         list_add_tail(&reply->list, &card->cmd_waiter_list);
569         spin_unlock_irq(&card->lock);
570 }
571
572 static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
573 {
574         spin_lock_irq(&card->lock);
575         list_del(&reply->list);
576         spin_unlock_irq(&card->lock);
577 }
578
579 static void qeth_notify_reply(struct qeth_reply *reply)
580 {
581         atomic_inc(&reply->received);
582         wake_up(&reply->wait_q);
583 }
584
585 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
586                 struct qeth_card *card)
587 {
588         const char *ipa_name;
589         int com = cmd->hdr.command;
590         ipa_name = qeth_get_ipa_cmd_name(com);
591
592         if (rc)
593                 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
594                                  ipa_name, com, CARD_DEVID(card), rc,
595                                  qeth_get_ipa_msg(rc));
596         else
597                 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
598                                  ipa_name, com, CARD_DEVID(card));
599 }
600
601 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
602                                                 struct qeth_ipa_cmd *cmd)
603 {
604         QETH_CARD_TEXT(card, 5, "chkipad");
605
606         if (IS_IPA_REPLY(cmd)) {
607                 if (cmd->hdr.command != IPA_CMD_SETCCID &&
608                     cmd->hdr.command != IPA_CMD_DELCCID &&
609                     cmd->hdr.command != IPA_CMD_MODCCID &&
610                     cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
611                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
612                 return cmd;
613         }
614
615         /* handle unsolicited event: */
616         switch (cmd->hdr.command) {
617         case IPA_CMD_STOPLAN:
618                 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
619                         dev_err(&card->gdev->dev,
620                                 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
621                                 QETH_CARD_IFNAME(card));
622                         schedule_work(&card->close_dev_work);
623                 } else {
624                         dev_warn(&card->gdev->dev,
625                                  "The link for interface %s on CHPID 0x%X failed\n",
626                                  QETH_CARD_IFNAME(card), card->info.chpid);
627                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
628                         netif_carrier_off(card->dev);
629                 }
630                 return NULL;
631         case IPA_CMD_STARTLAN:
632                 dev_info(&card->gdev->dev,
633                          "The link for %s on CHPID 0x%X has been restored\n",
634                          QETH_CARD_IFNAME(card), card->info.chpid);
635                 if (card->info.hwtrap)
636                         card->info.hwtrap = 2;
637                 qeth_schedule_recovery(card);
638                 return NULL;
639         case IPA_CMD_SETBRIDGEPORT_IQD:
640         case IPA_CMD_SETBRIDGEPORT_OSA:
641         case IPA_CMD_ADDRESS_CHANGE_NOTIF:
642                 if (card->discipline->control_event_handler(card, cmd))
643                         return cmd;
644                 return NULL;
645         case IPA_CMD_MODCCID:
646                 return cmd;
647         case IPA_CMD_REGISTER_LOCAL_ADDR:
648                 QETH_CARD_TEXT(card, 3, "irla");
649                 return NULL;
650         case IPA_CMD_UNREGISTER_LOCAL_ADDR:
651                 QETH_CARD_TEXT(card, 3, "urla");
652                 return NULL;
653         default:
654                 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
655                 return cmd;
656         }
657 }
658
659 void qeth_clear_ipacmd_list(struct qeth_card *card)
660 {
661         struct qeth_reply *reply;
662         unsigned long flags;
663
664         QETH_CARD_TEXT(card, 4, "clipalst");
665
666         spin_lock_irqsave(&card->lock, flags);
667         list_for_each_entry(reply, &card->cmd_waiter_list, list) {
668                 reply->rc = -EIO;
669                 qeth_notify_reply(reply);
670         }
671         spin_unlock_irqrestore(&card->lock, flags);
672 }
673 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
674
675 static int qeth_check_idx_response(struct qeth_card *card,
676         unsigned char *buffer)
677 {
678         if (!buffer)
679                 return 0;
680
681         QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
682         if ((buffer[2] & 0xc0) == 0xc0) {
683                 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
684                                  buffer[4]);
685                 QETH_CARD_TEXT(card, 2, "ckidxres");
686                 QETH_CARD_TEXT(card, 2, " idxterm");
687                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
688                 if (buffer[4] == 0xf6) {
689                         dev_err(&card->gdev->dev,
690                         "The qeth device is not configured "
691                         "for the OSI layer required by z/VM\n");
692                         return -EPERM;
693                 }
694                 return -EIO;
695         }
696         return 0;
697 }
698
699 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
700 {
701         __u8 index;
702
703         index = channel->io_buf_no;
704         do {
705                 if (channel->iob[index].state == BUF_STATE_FREE) {
706                         channel->iob[index].state = BUF_STATE_LOCKED;
707                         channel->io_buf_no = (channel->io_buf_no + 1) %
708                                 QETH_CMD_BUFFER_NO;
709                         memset(channel->iob[index].data, 0, QETH_BUFSIZE);
710                         return channel->iob + index;
711                 }
712                 index = (index + 1) % QETH_CMD_BUFFER_NO;
713         } while (index != channel->io_buf_no);
714
715         return NULL;
716 }
717
718 void qeth_release_buffer(struct qeth_channel *channel,
719                 struct qeth_cmd_buffer *iob)
720 {
721         unsigned long flags;
722
723         spin_lock_irqsave(&channel->iob_lock, flags);
724         iob->state = BUF_STATE_FREE;
725         iob->callback = qeth_send_control_data_cb;
726         if (iob->reply) {
727                 qeth_put_reply(iob->reply);
728                 iob->reply = NULL;
729         }
730         spin_unlock_irqrestore(&channel->iob_lock, flags);
731         wake_up(&channel->wait_q);
732 }
733 EXPORT_SYMBOL_GPL(qeth_release_buffer);
734
735 static void qeth_release_buffer_cb(struct qeth_card *card,
736                                    struct qeth_channel *channel,
737                                    struct qeth_cmd_buffer *iob)
738 {
739         qeth_release_buffer(channel, iob);
740 }
741
742 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
743 {
744         struct qeth_reply *reply = iob->reply;
745
746         if (reply) {
747                 reply->rc = rc;
748                 qeth_notify_reply(reply);
749         }
750         qeth_release_buffer(iob->channel, iob);
751 }
752
753 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
754 {
755         struct qeth_cmd_buffer *buffer = NULL;
756         unsigned long flags;
757
758         spin_lock_irqsave(&channel->iob_lock, flags);
759         buffer = __qeth_get_buffer(channel);
760         spin_unlock_irqrestore(&channel->iob_lock, flags);
761         return buffer;
762 }
763
764 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
765 {
766         struct qeth_cmd_buffer *buffer;
767         wait_event(channel->wait_q,
768                    ((buffer = qeth_get_buffer(channel)) != NULL));
769         return buffer;
770 }
771 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
772
773 void qeth_clear_cmd_buffers(struct qeth_channel *channel)
774 {
775         int cnt;
776
777         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
778                 qeth_release_buffer(channel, &channel->iob[cnt]);
779         channel->io_buf_no = 0;
780 }
781 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
782
783 static void qeth_send_control_data_cb(struct qeth_card *card,
784                                       struct qeth_channel *channel,
785                                       struct qeth_cmd_buffer *iob)
786 {
787         struct qeth_ipa_cmd *cmd = NULL;
788         struct qeth_reply *reply = NULL;
789         struct qeth_reply *r;
790         unsigned long flags;
791         int rc = 0;
792
793         QETH_CARD_TEXT(card, 4, "sndctlcb");
794         rc = qeth_check_idx_response(card, iob->data);
795         switch (rc) {
796         case 0:
797                 break;
798         case -EIO:
799                 qeth_clear_ipacmd_list(card);
800                 qeth_schedule_recovery(card);
801                 /* fall through */
802         default:
803                 goto out;
804         }
805
806         if (IS_IPA(iob->data)) {
807                 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
808                 cmd = qeth_check_ipa_data(card, cmd);
809                 if (!cmd)
810                         goto out;
811                 if (IS_OSN(card) && card->osn_info.assist_cb &&
812                     cmd->hdr.command != IPA_CMD_STARTLAN) {
813                         card->osn_info.assist_cb(card->dev, cmd);
814                         goto out;
815                 }
816         } else {
817                 /* non-IPA commands should only flow during initialization */
818                 if (card->state != CARD_STATE_DOWN)
819                         goto out;
820         }
821
822         /* match against pending cmd requests */
823         spin_lock_irqsave(&card->lock, flags);
824         list_for_each_entry(r, &card->cmd_waiter_list, list) {
825                 if ((r->seqno == QETH_IDX_COMMAND_SEQNO) ||
826                     (cmd && (r->seqno == cmd->hdr.seqno))) {
827                         reply = r;
828                         /* take the object outside the lock */
829                         qeth_get_reply(reply);
830                         break;
831                 }
832         }
833         spin_unlock_irqrestore(&card->lock, flags);
834
835         if (!reply)
836                 goto out;
837
838         if (!reply->callback) {
839                 rc = 0;
840         } else {
841                 if (cmd) {
842                         reply->offset = (u16)((char *)cmd - (char *)iob->data);
843                         rc = reply->callback(card, reply, (unsigned long)cmd);
844                 } else {
845                         rc = reply->callback(card, reply, (unsigned long)iob);
846                 }
847         }
848
849         if (rc <= 0) {
850                 reply->rc = rc;
851                 qeth_notify_reply(reply);
852         }
853
854         qeth_put_reply(reply);
855
856 out:
857         memcpy(&card->seqno.pdu_hdr_ack,
858                 QETH_PDU_HEADER_SEQ_NO(iob->data),
859                 QETH_SEQ_NO_LENGTH);
860         qeth_release_buffer(channel, iob);
861 }
862
863 static int qeth_set_thread_start_bit(struct qeth_card *card,
864                 unsigned long thread)
865 {
866         unsigned long flags;
867
868         spin_lock_irqsave(&card->thread_mask_lock, flags);
869         if (!(card->thread_allowed_mask & thread) ||
870               (card->thread_start_mask & thread)) {
871                 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
872                 return -EPERM;
873         }
874         card->thread_start_mask |= thread;
875         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
876         return 0;
877 }
878
879 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
880 {
881         unsigned long flags;
882
883         spin_lock_irqsave(&card->thread_mask_lock, flags);
884         card->thread_start_mask &= ~thread;
885         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
886         wake_up(&card->wait_q);
887 }
888 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
889
890 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
891 {
892         unsigned long flags;
893
894         spin_lock_irqsave(&card->thread_mask_lock, flags);
895         card->thread_running_mask &= ~thread;
896         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
897         wake_up_all(&card->wait_q);
898 }
899 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
900
901 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
902 {
903         unsigned long flags;
904         int rc = 0;
905
906         spin_lock_irqsave(&card->thread_mask_lock, flags);
907         if (card->thread_start_mask & thread) {
908                 if ((card->thread_allowed_mask & thread) &&
909                     !(card->thread_running_mask & thread)) {
910                         rc = 1;
911                         card->thread_start_mask &= ~thread;
912                         card->thread_running_mask |= thread;
913                 } else
914                         rc = -EPERM;
915         }
916         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
917         return rc;
918 }
919
920 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
921 {
922         int rc = 0;
923
924         wait_event(card->wait_q,
925                    (rc = __qeth_do_run_thread(card, thread)) >= 0);
926         return rc;
927 }
928 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
929
930 void qeth_schedule_recovery(struct qeth_card *card)
931 {
932         QETH_CARD_TEXT(card, 2, "startrec");
933         if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
934                 schedule_work(&card->kernel_thread_starter);
935 }
936 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
937
938 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
939                             struct irb *irb)
940 {
941         int dstat, cstat;
942         char *sense;
943
944         sense = (char *) irb->ecw;
945         cstat = irb->scsw.cmd.cstat;
946         dstat = irb->scsw.cmd.dstat;
947
948         if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
949                      SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
950                      SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
951                 QETH_CARD_TEXT(card, 2, "CGENCHK");
952                 dev_warn(&cdev->dev, "The qeth device driver "
953                         "failed to recover an error on the device\n");
954                 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
955                                  CCW_DEVID(cdev), dstat, cstat);
956                 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
957                                 16, 1, irb, 64, 1);
958                 return 1;
959         }
960
961         if (dstat & DEV_STAT_UNIT_CHECK) {
962                 if (sense[SENSE_RESETTING_EVENT_BYTE] &
963                     SENSE_RESETTING_EVENT_FLAG) {
964                         QETH_CARD_TEXT(card, 2, "REVIND");
965                         return 1;
966                 }
967                 if (sense[SENSE_COMMAND_REJECT_BYTE] &
968                     SENSE_COMMAND_REJECT_FLAG) {
969                         QETH_CARD_TEXT(card, 2, "CMDREJi");
970                         return 1;
971                 }
972                 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
973                         QETH_CARD_TEXT(card, 2, "AFFE");
974                         return 1;
975                 }
976                 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
977                         QETH_CARD_TEXT(card, 2, "ZEROSEN");
978                         return 0;
979                 }
980                 QETH_CARD_TEXT(card, 2, "DGENCHK");
981                         return 1;
982         }
983         return 0;
984 }
985
986 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
987                                 unsigned long intparm, struct irb *irb)
988 {
989         if (!IS_ERR(irb))
990                 return 0;
991
992         switch (PTR_ERR(irb)) {
993         case -EIO:
994                 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
995                                  CCW_DEVID(cdev));
996                 QETH_CARD_TEXT(card, 2, "ckirberr");
997                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
998                 return -EIO;
999         case -ETIMEDOUT:
1000                 dev_warn(&cdev->dev, "A hardware operation timed out"
1001                         " on the device\n");
1002                 QETH_CARD_TEXT(card, 2, "ckirberr");
1003                 QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1004                 if (intparm == QETH_RCD_PARM) {
1005                         if (card->data.ccwdev == cdev) {
1006                                 card->data.state = CH_STATE_DOWN;
1007                                 wake_up(&card->wait_q);
1008                         }
1009                 }
1010                 return -ETIMEDOUT;
1011         default:
1012                 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1013                                  PTR_ERR(irb), CCW_DEVID(cdev));
1014                 QETH_CARD_TEXT(card, 2, "ckirberr");
1015                 QETH_CARD_TEXT(card, 2, "  rc???");
1016                 return PTR_ERR(irb);
1017         }
1018 }
1019
1020 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1021                 struct irb *irb)
1022 {
1023         int rc;
1024         int cstat, dstat;
1025         struct qeth_cmd_buffer *iob = NULL;
1026         struct ccwgroup_device *gdev;
1027         struct qeth_channel *channel;
1028         struct qeth_card *card;
1029
1030         /* while we hold the ccwdev lock, this stays valid: */
1031         gdev = dev_get_drvdata(&cdev->dev);
1032         card = dev_get_drvdata(&gdev->dev);
1033         if (!card)
1034                 return;
1035
1036         QETH_CARD_TEXT(card, 5, "irq");
1037
1038         if (card->read.ccwdev == cdev) {
1039                 channel = &card->read;
1040                 QETH_CARD_TEXT(card, 5, "read");
1041         } else if (card->write.ccwdev == cdev) {
1042                 channel = &card->write;
1043                 QETH_CARD_TEXT(card, 5, "write");
1044         } else {
1045                 channel = &card->data;
1046                 QETH_CARD_TEXT(card, 5, "data");
1047         }
1048
1049         if (qeth_intparm_is_iob(intparm))
1050                 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1051
1052         rc = qeth_check_irb_error(card, cdev, intparm, irb);
1053         if (rc) {
1054                 /* IO was terminated, free its resources. */
1055                 if (iob)
1056                         qeth_cancel_cmd(iob, rc);
1057                 atomic_set(&channel->irq_pending, 0);
1058                 wake_up(&card->wait_q);
1059                 return;
1060         }
1061
1062         atomic_set(&channel->irq_pending, 0);
1063
1064         if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1065                 channel->state = CH_STATE_STOPPED;
1066
1067         if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1068                 channel->state = CH_STATE_HALTED;
1069
1070         /*let's wake up immediately on data channel*/
1071         if ((channel == &card->data) && (intparm != 0) &&
1072             (intparm != QETH_RCD_PARM))
1073                 goto out;
1074
1075         if (intparm == QETH_CLEAR_CHANNEL_PARM) {
1076                 QETH_CARD_TEXT(card, 6, "clrchpar");
1077                 /* we don't have to handle this further */
1078                 intparm = 0;
1079         }
1080         if (intparm == QETH_HALT_CHANNEL_PARM) {
1081                 QETH_CARD_TEXT(card, 6, "hltchpar");
1082                 /* we don't have to handle this further */
1083                 intparm = 0;
1084         }
1085
1086         cstat = irb->scsw.cmd.cstat;
1087         dstat = irb->scsw.cmd.dstat;
1088
1089         if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1090             (dstat & DEV_STAT_UNIT_CHECK) ||
1091             (cstat)) {
1092                 if (irb->esw.esw0.erw.cons) {
1093                         dev_warn(&channel->ccwdev->dev,
1094                                 "The qeth device driver failed to recover "
1095                                 "an error on the device\n");
1096                         QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1097                                          CCW_DEVID(channel->ccwdev), cstat,
1098                                          dstat);
1099                         print_hex_dump(KERN_WARNING, "qeth: irb ",
1100                                 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1101                         print_hex_dump(KERN_WARNING, "qeth: sense data ",
1102                                 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1103                 }
1104                 if (intparm == QETH_RCD_PARM) {
1105                         channel->state = CH_STATE_DOWN;
1106                         goto out;
1107                 }
1108                 rc = qeth_get_problem(card, cdev, irb);
1109                 if (rc) {
1110                         card->read_or_write_problem = 1;
1111                         if (iob)
1112                                 qeth_cancel_cmd(iob, rc);
1113                         qeth_clear_ipacmd_list(card);
1114                         qeth_schedule_recovery(card);
1115                         goto out;
1116                 }
1117         }
1118
1119         if (intparm == QETH_RCD_PARM) {
1120                 channel->state = CH_STATE_RCD_DONE;
1121                 goto out;
1122         }
1123         if (channel == &card->data)
1124                 return;
1125         if (channel == &card->read &&
1126             channel->state == CH_STATE_UP)
1127                 __qeth_issue_next_read(card);
1128
1129         if (iob && iob->callback)
1130                 iob->callback(card, iob->channel, iob);
1131
1132 out:
1133         wake_up(&card->wait_q);
1134         return;
1135 }
1136
1137 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1138                 struct qeth_qdio_out_buffer *buf,
1139                 enum iucv_tx_notify notification)
1140 {
1141         struct sk_buff *skb;
1142
1143         skb_queue_walk(&buf->skb_list, skb) {
1144                 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1145                 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1146                 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1147                         iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1148         }
1149 }
1150
1151 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1152 {
1153         struct sk_buff *skb;
1154
1155         /* release may never happen from within CQ tasklet scope */
1156         WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1157
1158         if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1159                 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1160
1161         while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
1162                 consume_skb(skb);
1163 }
1164
1165 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1166                                      struct qeth_qdio_out_buffer *buf)
1167 {
1168         int i;
1169
1170         /* is PCI flag set on buffer? */
1171         if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1172                 atomic_dec(&queue->set_pci_flags_count);
1173
1174         qeth_release_skbs(buf);
1175
1176         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1177                 if (buf->buffer->element[i].addr && buf->is_header[i])
1178                         kmem_cache_free(qeth_core_header_cache,
1179                                 buf->buffer->element[i].addr);
1180                 buf->is_header[i] = 0;
1181         }
1182
1183         qeth_scrub_qdio_buffer(buf->buffer,
1184                                QETH_MAX_BUFFER_ELEMENTS(queue->card));
1185         buf->next_element_to_fill = 0;
1186         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1187 }
1188
1189 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1190 {
1191         int j;
1192
1193         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1194                 if (!q->bufs[j])
1195                         continue;
1196                 qeth_cleanup_handled_pending(q, j, 1);
1197                 qeth_clear_output_buffer(q, q->bufs[j]);
1198                 if (free) {
1199                         kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1200                         q->bufs[j] = NULL;
1201                 }
1202         }
1203 }
1204
1205 void qeth_clear_qdio_buffers(struct qeth_card *card)
1206 {
1207         int i;
1208
1209         QETH_CARD_TEXT(card, 2, "clearqdbf");
1210         /* clear outbound buffers to free skbs */
1211         for (i = 0; i < card->qdio.no_out_queues; ++i) {
1212                 if (card->qdio.out_qs[i]) {
1213                         qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
1214                 }
1215         }
1216 }
1217 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1218
1219 static void qeth_free_buffer_pool(struct qeth_card *card)
1220 {
1221         struct qeth_buffer_pool_entry *pool_entry, *tmp;
1222         int i = 0;
1223         list_for_each_entry_safe(pool_entry, tmp,
1224                                  &card->qdio.init_pool.entry_list, init_list){
1225                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1226                         free_page((unsigned long)pool_entry->elements[i]);
1227                 list_del(&pool_entry->init_list);
1228                 kfree(pool_entry);
1229         }
1230 }
1231
1232 static void qeth_clean_channel(struct qeth_channel *channel)
1233 {
1234         struct ccw_device *cdev = channel->ccwdev;
1235         int cnt;
1236
1237         QETH_DBF_TEXT(SETUP, 2, "freech");
1238
1239         spin_lock_irq(get_ccwdev_lock(cdev));
1240         cdev->handler = NULL;
1241         spin_unlock_irq(get_ccwdev_lock(cdev));
1242
1243         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
1244                 kfree(channel->iob[cnt].data);
1245         kfree(channel->ccw);
1246 }
1247
1248 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
1249 {
1250         struct ccw_device *cdev = channel->ccwdev;
1251         int cnt;
1252
1253         QETH_DBF_TEXT(SETUP, 2, "setupch");
1254
1255         channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1256         if (!channel->ccw)
1257                 return -ENOMEM;
1258         channel->state = CH_STATE_DOWN;
1259         atomic_set(&channel->irq_pending, 0);
1260         init_waitqueue_head(&channel->wait_q);
1261
1262         spin_lock_irq(get_ccwdev_lock(cdev));
1263         cdev->handler = qeth_irq;
1264         spin_unlock_irq(get_ccwdev_lock(cdev));
1265
1266         if (!alloc_buffers)
1267                 return 0;
1268
1269         for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
1270                 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
1271                                                  GFP_KERNEL | GFP_DMA);
1272                 if (channel->iob[cnt].data == NULL)
1273                         break;
1274                 channel->iob[cnt].state = BUF_STATE_FREE;
1275                 channel->iob[cnt].channel = channel;
1276                 channel->iob[cnt].callback = qeth_send_control_data_cb;
1277         }
1278         if (cnt < QETH_CMD_BUFFER_NO) {
1279                 qeth_clean_channel(channel);
1280                 return -ENOMEM;
1281         }
1282         channel->io_buf_no = 0;
1283         spin_lock_init(&channel->iob_lock);
1284
1285         return 0;
1286 }
1287
1288 static void qeth_set_single_write_queues(struct qeth_card *card)
1289 {
1290         if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1291             (card->qdio.no_out_queues == 4))
1292                 qeth_free_qdio_buffers(card);
1293
1294         card->qdio.no_out_queues = 1;
1295         if (card->qdio.default_out_queue != 0)
1296                 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1297
1298         card->qdio.default_out_queue = 0;
1299 }
1300
1301 static void qeth_set_multiple_write_queues(struct qeth_card *card)
1302 {
1303         if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1304             (card->qdio.no_out_queues == 1)) {
1305                 qeth_free_qdio_buffers(card);
1306                 card->qdio.default_out_queue = 2;
1307         }
1308         card->qdio.no_out_queues = 4;
1309 }
1310
1311 static void qeth_update_from_chp_desc(struct qeth_card *card)
1312 {
1313         struct ccw_device *ccwdev;
1314         struct channel_path_desc_fmt0 *chp_dsc;
1315
1316         QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1317
1318         ccwdev = card->data.ccwdev;
1319         chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1320         if (!chp_dsc)
1321                 goto out;
1322
1323         card->info.func_level = 0x4100 + chp_dsc->desc;
1324         if (card->info.type == QETH_CARD_TYPE_IQD)
1325                 goto out;
1326
1327         /* CHPP field bit 6 == 1 -> single queue */
1328         if ((chp_dsc->chpp & 0x02) == 0x02)
1329                 qeth_set_single_write_queues(card);
1330         else
1331                 qeth_set_multiple_write_queues(card);
1332 out:
1333         kfree(chp_dsc);
1334         QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1335         QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1336 }
1337
1338 static void qeth_init_qdio_info(struct qeth_card *card)
1339 {
1340         QETH_DBF_TEXT(SETUP, 4, "intqdinf");
1341         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1342         card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1343         card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1344         card->qdio.no_out_queues = QETH_MAX_QUEUES;
1345
1346         /* inbound */
1347         card->qdio.no_in_queues = 1;
1348         card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1349         if (card->info.type == QETH_CARD_TYPE_IQD)
1350                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1351         else
1352                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1353         card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1354         INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1355         INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1356 }
1357
1358 static void qeth_set_initial_options(struct qeth_card *card)
1359 {
1360         card->options.route4.type = NO_ROUTER;
1361         card->options.route6.type = NO_ROUTER;
1362         card->options.rx_sg_cb = QETH_RX_SG_CB;
1363         card->options.isolation = ISOLATION_MODE_NONE;
1364         card->options.cq = QETH_CQ_DISABLED;
1365         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1366 }
1367
1368 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1369 {
1370         unsigned long flags;
1371         int rc = 0;
1372
1373         spin_lock_irqsave(&card->thread_mask_lock, flags);
1374         QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1375                         (u8) card->thread_start_mask,
1376                         (u8) card->thread_allowed_mask,
1377                         (u8) card->thread_running_mask);
1378         rc = (card->thread_start_mask & thread);
1379         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1380         return rc;
1381 }
1382
1383 static void qeth_start_kernel_thread(struct work_struct *work)
1384 {
1385         struct task_struct *ts;
1386         struct qeth_card *card = container_of(work, struct qeth_card,
1387                                         kernel_thread_starter);
1388         QETH_CARD_TEXT(card , 2, "strthrd");
1389
1390         if (card->read.state != CH_STATE_UP &&
1391             card->write.state != CH_STATE_UP)
1392                 return;
1393         if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1394                 ts = kthread_run(card->discipline->recover, (void *)card,
1395                                 "qeth_recover");
1396                 if (IS_ERR(ts)) {
1397                         qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1398                         qeth_clear_thread_running_bit(card,
1399                                 QETH_RECOVER_THREAD);
1400                 }
1401         }
1402 }
1403
1404 static void qeth_buffer_reclaim_work(struct work_struct *);
1405 static void qeth_setup_card(struct qeth_card *card)
1406 {
1407         QETH_DBF_TEXT(SETUP, 2, "setupcrd");
1408         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1409
1410         card->info.type = CARD_RDEV(card)->id.driver_info;
1411         card->state = CARD_STATE_DOWN;
1412         spin_lock_init(&card->mclock);
1413         spin_lock_init(&card->lock);
1414         spin_lock_init(&card->ip_lock);
1415         spin_lock_init(&card->thread_mask_lock);
1416         mutex_init(&card->conf_mutex);
1417         mutex_init(&card->discipline_mutex);
1418         INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1419         INIT_LIST_HEAD(&card->cmd_waiter_list);
1420         init_waitqueue_head(&card->wait_q);
1421         qeth_set_initial_options(card);
1422         /* IP address takeover */
1423         INIT_LIST_HEAD(&card->ipato.entries);
1424         qeth_init_qdio_info(card);
1425         INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1426         INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1427 }
1428
1429 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1430 {
1431         struct qeth_card *card = container_of(slr, struct qeth_card,
1432                                         qeth_service_level);
1433         if (card->info.mcl_level[0])
1434                 seq_printf(m, "qeth: %s firmware level %s\n",
1435                         CARD_BUS_ID(card), card->info.mcl_level);
1436 }
1437
1438 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1439 {
1440         struct qeth_card *card;
1441
1442         QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1443         card = kzalloc(sizeof(*card), GFP_KERNEL);
1444         if (!card)
1445                 goto out;
1446         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1447
1448         card->gdev = gdev;
1449         dev_set_drvdata(&gdev->dev, card);
1450         CARD_RDEV(card) = gdev->cdev[0];
1451         CARD_WDEV(card) = gdev->cdev[1];
1452         CARD_DDEV(card) = gdev->cdev[2];
1453
1454         card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
1455         if (!card->event_wq)
1456                 goto out_wq;
1457         if (qeth_setup_channel(&card->read, true))
1458                 goto out_ip;
1459         if (qeth_setup_channel(&card->write, true))
1460                 goto out_channel;
1461         if (qeth_setup_channel(&card->data, false))
1462                 goto out_data;
1463         card->qeth_service_level.seq_print = qeth_core_sl_print;
1464         register_service_level(&card->qeth_service_level);
1465         return card;
1466
1467 out_data:
1468         qeth_clean_channel(&card->write);
1469 out_channel:
1470         qeth_clean_channel(&card->read);
1471 out_ip:
1472         destroy_workqueue(card->event_wq);
1473 out_wq:
1474         dev_set_drvdata(&gdev->dev, NULL);
1475         kfree(card);
1476 out:
1477         return NULL;
1478 }
1479
1480 static int qeth_clear_channel(struct qeth_card *card,
1481                               struct qeth_channel *channel)
1482 {
1483         int rc;
1484
1485         QETH_CARD_TEXT(card, 3, "clearch");
1486         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1487         rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
1488         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1489
1490         if (rc)
1491                 return rc;
1492         rc = wait_event_interruptible_timeout(card->wait_q,
1493                         channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1494         if (rc == -ERESTARTSYS)
1495                 return rc;
1496         if (channel->state != CH_STATE_STOPPED)
1497                 return -ETIME;
1498         channel->state = CH_STATE_DOWN;
1499         return 0;
1500 }
1501
1502 static int qeth_halt_channel(struct qeth_card *card,
1503                              struct qeth_channel *channel)
1504 {
1505         int rc;
1506
1507         QETH_CARD_TEXT(card, 3, "haltch");
1508         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1509         rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
1510         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1511
1512         if (rc)
1513                 return rc;
1514         rc = wait_event_interruptible_timeout(card->wait_q,
1515                         channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1516         if (rc == -ERESTARTSYS)
1517                 return rc;
1518         if (channel->state != CH_STATE_HALTED)
1519                 return -ETIME;
1520         return 0;
1521 }
1522
1523 static int qeth_halt_channels(struct qeth_card *card)
1524 {
1525         int rc1 = 0, rc2 = 0, rc3 = 0;
1526
1527         QETH_CARD_TEXT(card, 3, "haltchs");
1528         rc1 = qeth_halt_channel(card, &card->read);
1529         rc2 = qeth_halt_channel(card, &card->write);
1530         rc3 = qeth_halt_channel(card, &card->data);
1531         if (rc1)
1532                 return rc1;
1533         if (rc2)
1534                 return rc2;
1535         return rc3;
1536 }
1537
1538 static int qeth_clear_channels(struct qeth_card *card)
1539 {
1540         int rc1 = 0, rc2 = 0, rc3 = 0;
1541
1542         QETH_CARD_TEXT(card, 3, "clearchs");
1543         rc1 = qeth_clear_channel(card, &card->read);
1544         rc2 = qeth_clear_channel(card, &card->write);
1545         rc3 = qeth_clear_channel(card, &card->data);
1546         if (rc1)
1547                 return rc1;
1548         if (rc2)
1549                 return rc2;
1550         return rc3;
1551 }
1552
1553 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1554 {
1555         int rc = 0;
1556
1557         QETH_CARD_TEXT(card, 3, "clhacrd");
1558
1559         if (halt)
1560                 rc = qeth_halt_channels(card);
1561         if (rc)
1562                 return rc;
1563         return qeth_clear_channels(card);
1564 }
1565
1566 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1567 {
1568         int rc = 0;
1569
1570         QETH_CARD_TEXT(card, 3, "qdioclr");
1571         switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1572                 QETH_QDIO_CLEANING)) {
1573         case QETH_QDIO_ESTABLISHED:
1574                 if (card->info.type == QETH_CARD_TYPE_IQD)
1575                         rc = qdio_shutdown(CARD_DDEV(card),
1576                                 QDIO_FLAG_CLEANUP_USING_HALT);
1577                 else
1578                         rc = qdio_shutdown(CARD_DDEV(card),
1579                                 QDIO_FLAG_CLEANUP_USING_CLEAR);
1580                 if (rc)
1581                         QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1582                 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1583                 break;
1584         case QETH_QDIO_CLEANING:
1585                 return rc;
1586         default:
1587                 break;
1588         }
1589         rc = qeth_clear_halt_card(card, use_halt);
1590         if (rc)
1591                 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1592         card->state = CARD_STATE_DOWN;
1593         return rc;
1594 }
1595 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1596
1597 static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1598                                int *length)
1599 {
1600         struct ciw *ciw;
1601         char *rcd_buf;
1602         int ret;
1603         struct qeth_channel *channel = &card->data;
1604
1605         /*
1606          * scan for RCD command in extended SenseID data
1607          */
1608         ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1609         if (!ciw || ciw->cmd == 0)
1610                 return -EOPNOTSUPP;
1611         rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1612         if (!rcd_buf)
1613                 return -ENOMEM;
1614
1615         qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
1616         channel->state = CH_STATE_RCD;
1617         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1618         ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1619                                        QETH_RCD_PARM, LPM_ANYPATH, 0,
1620                                        QETH_RCD_TIMEOUT);
1621         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1622         if (!ret)
1623                 wait_event(card->wait_q,
1624                            (channel->state == CH_STATE_RCD_DONE ||
1625                             channel->state == CH_STATE_DOWN));
1626         if (channel->state == CH_STATE_DOWN)
1627                 ret = -EIO;
1628         else
1629                 channel->state = CH_STATE_DOWN;
1630         if (ret) {
1631                 kfree(rcd_buf);
1632                 *buffer = NULL;
1633                 *length = 0;
1634         } else {
1635                 *length = ciw->count;
1636                 *buffer = rcd_buf;
1637         }
1638         return ret;
1639 }
1640
1641 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1642 {
1643         QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1644         card->info.chpid = prcd[30];
1645         card->info.unit_addr2 = prcd[31];
1646         card->info.cula = prcd[63];
1647         card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1648                                (prcd[0x11] == _ascebc['M']));
1649 }
1650
1651 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1652 {
1653         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1654         struct diag26c_vnic_resp *response = NULL;
1655         struct diag26c_vnic_req *request = NULL;
1656         struct ccw_dev_id id;
1657         char userid[80];
1658         int rc = 0;
1659
1660         QETH_DBF_TEXT(SETUP, 2, "vmlayer");
1661
1662         cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1663         if (rc)
1664                 goto out;
1665
1666         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1667         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1668         if (!request || !response) {
1669                 rc = -ENOMEM;
1670                 goto out;
1671         }
1672
1673         ccw_device_get_id(CARD_RDEV(card), &id);
1674         request->resp_buf_len = sizeof(*response);
1675         request->resp_version = DIAG26C_VERSION6_VM65918;
1676         request->req_format = DIAG26C_VNIC_INFO;
1677         ASCEBC(userid, 8);
1678         memcpy(&request->sys_name, userid, 8);
1679         request->devno = id.devno;
1680
1681         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1682         rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1683         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1684         if (rc)
1685                 goto out;
1686         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1687
1688         if (request->resp_buf_len < sizeof(*response) ||
1689             response->version != request->resp_version) {
1690                 rc = -EIO;
1691                 goto out;
1692         }
1693
1694         if (response->protocol == VNIC_INFO_PROT_L2)
1695                 disc = QETH_DISCIPLINE_LAYER2;
1696         else if (response->protocol == VNIC_INFO_PROT_L3)
1697                 disc = QETH_DISCIPLINE_LAYER3;
1698
1699 out:
1700         kfree(response);
1701         kfree(request);
1702         if (rc)
1703                 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
1704         return disc;
1705 }
1706
1707 /* Determine whether the device requires a specific layer discipline */
1708 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1709 {
1710         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1711
1712         if (card->info.type == QETH_CARD_TYPE_OSM ||
1713             card->info.type == QETH_CARD_TYPE_OSN)
1714                 disc = QETH_DISCIPLINE_LAYER2;
1715         else if (card->info.guestlan)
1716                 disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
1717                                 QETH_DISCIPLINE_LAYER3 :
1718                                 qeth_vm_detect_layer(card);
1719
1720         switch (disc) {
1721         case QETH_DISCIPLINE_LAYER2:
1722                 QETH_DBF_TEXT(SETUP, 3, "force l2");
1723                 break;
1724         case QETH_DISCIPLINE_LAYER3:
1725                 QETH_DBF_TEXT(SETUP, 3, "force l3");
1726                 break;
1727         default:
1728                 QETH_DBF_TEXT(SETUP, 3, "force no");
1729         }
1730
1731         return disc;
1732 }
1733
1734 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1735 {
1736         QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1737
1738         if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
1739             prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
1740                 card->info.blkt.time_total = 0;
1741                 card->info.blkt.inter_packet = 0;
1742                 card->info.blkt.inter_packet_jumbo = 0;
1743         } else {
1744                 card->info.blkt.time_total = 250;
1745                 card->info.blkt.inter_packet = 5;
1746                 card->info.blkt.inter_packet_jumbo = 15;
1747         }
1748 }
1749
1750 static void qeth_init_tokens(struct qeth_card *card)
1751 {
1752         card->token.issuer_rm_w = 0x00010103UL;
1753         card->token.cm_filter_w = 0x00010108UL;
1754         card->token.cm_connection_w = 0x0001010aUL;
1755         card->token.ulp_filter_w = 0x0001010bUL;
1756         card->token.ulp_connection_w = 0x0001010dUL;
1757 }
1758
1759 static void qeth_init_func_level(struct qeth_card *card)
1760 {
1761         switch (card->info.type) {
1762         case QETH_CARD_TYPE_IQD:
1763                 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1764                 break;
1765         case QETH_CARD_TYPE_OSD:
1766         case QETH_CARD_TYPE_OSN:
1767                 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1768                 break;
1769         default:
1770                 break;
1771         }
1772 }
1773
1774 static int qeth_idx_activate_get_answer(struct qeth_card *card,
1775                                         struct qeth_channel *channel,
1776                                         void (*reply_cb)(struct qeth_card *,
1777                                                          struct qeth_channel *,
1778                                                          struct qeth_cmd_buffer *))
1779 {
1780         struct qeth_cmd_buffer *iob;
1781         int rc;
1782
1783         QETH_DBF_TEXT(SETUP, 2, "idxanswr");
1784         iob = qeth_get_buffer(channel);
1785         if (!iob)
1786                 return -ENOMEM;
1787         iob->callback = reply_cb;
1788         qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
1789
1790         wait_event(card->wait_q,
1791                    atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1792         QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1793         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1794         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1795                                       (addr_t) iob, 0, 0, QETH_TIMEOUT);
1796         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1797
1798         if (rc) {
1799                 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1800                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1801                 atomic_set(&channel->irq_pending, 0);
1802                 qeth_release_buffer(channel, iob);
1803                 wake_up(&card->wait_q);
1804                 return rc;
1805         }
1806         rc = wait_event_interruptible_timeout(card->wait_q,
1807                          channel->state == CH_STATE_UP, QETH_TIMEOUT);
1808         if (rc == -ERESTARTSYS)
1809                 return rc;
1810         if (channel->state != CH_STATE_UP) {
1811                 rc = -ETIME;
1812                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1813         } else
1814                 rc = 0;
1815         return rc;
1816 }
1817
1818 static int qeth_idx_activate_channel(struct qeth_card *card,
1819                                      struct qeth_channel *channel,
1820                                      void (*reply_cb)(struct qeth_card *,
1821                                                       struct qeth_channel *,
1822                                                       struct qeth_cmd_buffer *))
1823 {
1824         struct qeth_cmd_buffer *iob;
1825         __u16 temp;
1826         __u8 tmp;
1827         int rc;
1828         struct ccw_dev_id temp_devid;
1829
1830         QETH_DBF_TEXT(SETUP, 2, "idxactch");
1831
1832         iob = qeth_get_buffer(channel);
1833         if (!iob)
1834                 return -ENOMEM;
1835         iob->callback = reply_cb;
1836         qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
1837                        iob->data);
1838         if (channel == &card->write) {
1839                 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
1840                 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1841                        &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1842                 card->seqno.trans_hdr++;
1843         } else {
1844                 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
1845                 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1846                        &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1847         }
1848         tmp = ((u8)card->dev->dev_port) | 0x80;
1849         memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
1850         memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1851                &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1852         memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1853                &card->info.func_level, sizeof(__u16));
1854         ccw_device_get_id(CARD_DDEV(card), &temp_devid);
1855         memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
1856         temp = (card->info.cula << 8) + card->info.unit_addr2;
1857         memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1858
1859         wait_event(card->wait_q,
1860                    atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1861         QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1862         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1863         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
1864                                       (addr_t) iob, 0, 0, QETH_TIMEOUT);
1865         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1866
1867         if (rc) {
1868                 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
1869                         rc);
1870                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1871                 atomic_set(&channel->irq_pending, 0);
1872                 qeth_release_buffer(channel, iob);
1873                 wake_up(&card->wait_q);
1874                 return rc;
1875         }
1876         rc = wait_event_interruptible_timeout(card->wait_q,
1877                         channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
1878         if (rc == -ERESTARTSYS)
1879                 return rc;
1880         if (channel->state != CH_STATE_ACTIVATING) {
1881                 dev_warn(&channel->ccwdev->dev, "The qeth device driver"
1882                         " failed to recover an error on the device\n");
1883                 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
1884                                  CCW_DEVID(channel->ccwdev));
1885                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1886                 return -ETIME;
1887         }
1888         return qeth_idx_activate_get_answer(card, channel, reply_cb);
1889 }
1890
1891 static int qeth_peer_func_level(int level)
1892 {
1893         if ((level & 0xff) == 8)
1894                 return (level & 0xff) + 0x400;
1895         if (((level >> 8) & 3) == 1)
1896                 return (level & 0xff) + 0x200;
1897         return level;
1898 }
1899
1900 static void qeth_idx_write_cb(struct qeth_card *card,
1901                               struct qeth_channel *channel,
1902                               struct qeth_cmd_buffer *iob)
1903 {
1904         __u16 temp;
1905
1906         QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
1907
1908         if (channel->state == CH_STATE_DOWN) {
1909                 channel->state = CH_STATE_ACTIVATING;
1910                 goto out;
1911         }
1912
1913         if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1914                 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
1915                         dev_err(&channel->ccwdev->dev,
1916                                 "The adapter is used exclusively by another "
1917                                 "host\n");
1918                 else
1919                         QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1920                                          CCW_DEVID(channel->ccwdev));
1921                 goto out;
1922         }
1923         memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1924         if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
1925                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1926                                  CCW_DEVID(channel->ccwdev),
1927                                  card->info.func_level, temp);
1928                 goto out;
1929         }
1930         channel->state = CH_STATE_UP;
1931 out:
1932         qeth_release_buffer(channel, iob);
1933 }
1934
1935 static void qeth_idx_read_cb(struct qeth_card *card,
1936                              struct qeth_channel *channel,
1937                              struct qeth_cmd_buffer *iob)
1938 {
1939         __u16 temp;
1940
1941         QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
1942         if (channel->state == CH_STATE_DOWN) {
1943                 channel->state = CH_STATE_ACTIVATING;
1944                 goto out;
1945         }
1946
1947         if (qeth_check_idx_response(card, iob->data))
1948                         goto out;
1949
1950         if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
1951                 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1952                 case QETH_IDX_ACT_ERR_EXCL:
1953                         dev_err(&channel->ccwdev->dev,
1954                                 "The adapter is used exclusively by another "
1955                                 "host\n");
1956                         break;
1957                 case QETH_IDX_ACT_ERR_AUTH:
1958                 case QETH_IDX_ACT_ERR_AUTH_USER:
1959                         dev_err(&channel->ccwdev->dev,
1960                                 "Setting the device online failed because of "
1961                                 "insufficient authorization\n");
1962                         break;
1963                 default:
1964                         QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1965                                          CCW_DEVID(channel->ccwdev));
1966                 }
1967                 QETH_CARD_TEXT_(card, 2, "idxread%c",
1968                         QETH_IDX_ACT_CAUSE_CODE(iob->data));
1969                 goto out;
1970         }
1971
1972         memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1973         if (temp != qeth_peer_func_level(card->info.func_level)) {
1974                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1975                                  CCW_DEVID(channel->ccwdev),
1976                                  card->info.func_level, temp);
1977                 goto out;
1978         }
1979         memcpy(&card->token.issuer_rm_r,
1980                QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1981                QETH_MPC_TOKEN_LENGTH);
1982         memcpy(&card->info.mcl_level[0],
1983                QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1984         channel->state = CH_STATE_UP;
1985 out:
1986         qeth_release_buffer(channel, iob);
1987 }
1988
1989 void qeth_prepare_control_data(struct qeth_card *card, int len,
1990                 struct qeth_cmd_buffer *iob)
1991 {
1992         qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
1993         iob->callback = qeth_release_buffer_cb;
1994
1995         memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
1996                &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
1997         card->seqno.trans_hdr++;
1998         memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1999                &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2000         card->seqno.pdu_hdr++;
2001         memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2002                &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2003         QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
2004 }
2005 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
2006
2007 /**
2008  * qeth_send_control_data() -   send control command to the card
2009  * @card:                       qeth_card structure pointer
2010  * @len:                        size of the command buffer
2011  * @iob:                        qeth_cmd_buffer pointer
2012  * @reply_cb:                   callback function pointer
2013  * @cb_card:                    pointer to the qeth_card structure
2014  * @cb_reply:                   pointer to the qeth_reply structure
2015  * @cb_cmd:                     pointer to the original iob for non-IPA
2016  *                              commands, or to the qeth_ipa_cmd structure
2017  *                              for the IPA commands.
2018  * @reply_param:                private pointer passed to the callback
2019  *
2020  * Callback function gets called one or more times, with cb_cmd
2021  * pointing to the response returned by the hardware. Callback
2022  * function must return
2023  *   > 0 if more reply blocks are expected,
2024  *     0 if the last or only reply block is received, and
2025  *   < 0 on error.
2026  * Callback function can get the value of the reply_param pointer from the
2027  * field 'param' of the structure qeth_reply.
2028  */
2029
2030 static int qeth_send_control_data(struct qeth_card *card, int len,
2031                                   struct qeth_cmd_buffer *iob,
2032                                   int (*reply_cb)(struct qeth_card *cb_card,
2033                                                   struct qeth_reply *cb_reply,
2034                                                   unsigned long cb_cmd),
2035                                   void *reply_param)
2036 {
2037         struct qeth_channel *channel = iob->channel;
2038         int rc;
2039         struct qeth_reply *reply = NULL;
2040         unsigned long timeout, event_timeout;
2041         struct qeth_ipa_cmd *cmd = NULL;
2042
2043         QETH_CARD_TEXT(card, 2, "sendctl");
2044
2045         if (card->read_or_write_problem) {
2046                 qeth_release_buffer(channel, iob);
2047                 return -EIO;
2048         }
2049         reply = qeth_alloc_reply(card);
2050         if (!reply) {
2051                 qeth_release_buffer(channel, iob);
2052                 return -ENOMEM;
2053         }
2054         reply->callback = reply_cb;
2055         reply->param = reply_param;
2056
2057         /* pairs with qeth_release_buffer(): */
2058         qeth_get_reply(reply);
2059         iob->reply = reply;
2060
2061         while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
2062
2063         if (IS_IPA(iob->data)) {
2064                 cmd = __ipa_cmd(iob);
2065                 cmd->hdr.seqno = card->seqno.ipa++;
2066                 reply->seqno = cmd->hdr.seqno;
2067                 event_timeout = QETH_IPA_TIMEOUT;
2068         } else {
2069                 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2070                 event_timeout = QETH_TIMEOUT;
2071         }
2072         qeth_prepare_control_data(card, len, iob);
2073
2074         qeth_enqueue_reply(card, reply);
2075
2076         timeout = jiffies + event_timeout;
2077
2078         QETH_CARD_TEXT(card, 6, "noirqpnd");
2079         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2080         rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
2081                                       (addr_t) iob, 0, 0, event_timeout);
2082         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2083         if (rc) {
2084                 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2085                                  CARD_DEVID(card), rc);
2086                 QETH_CARD_TEXT_(card, 2, " err%d", rc);
2087                 qeth_dequeue_reply(card, reply);
2088                 qeth_put_reply(reply);
2089                 qeth_release_buffer(channel, iob);
2090                 atomic_set(&channel->irq_pending, 0);
2091                 wake_up(&card->wait_q);
2092                 return rc;
2093         }
2094
2095         /* we have only one long running ipassist, since we can ensure
2096            process context of this command we can sleep */
2097         if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2098             cmd->hdr.prot_version == QETH_PROT_IPV4) {
2099                 if (!wait_event_timeout(reply->wait_q,
2100                     atomic_read(&reply->received), event_timeout))
2101                         goto time_err;
2102         } else {
2103                 while (!atomic_read(&reply->received)) {
2104                         if (time_after(jiffies, timeout))
2105                                 goto time_err;
2106                         cpu_relax();
2107                 }
2108         }
2109
2110         qeth_dequeue_reply(card, reply);
2111         rc = reply->rc;
2112         qeth_put_reply(reply);
2113         return rc;
2114
2115 time_err:
2116         qeth_dequeue_reply(card, reply);
2117         qeth_put_reply(reply);
2118         return -ETIME;
2119 }
2120
2121 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2122                 unsigned long data)
2123 {
2124         struct qeth_cmd_buffer *iob;
2125
2126         QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
2127
2128         iob = (struct qeth_cmd_buffer *) data;
2129         memcpy(&card->token.cm_filter_r,
2130                QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2131                QETH_MPC_TOKEN_LENGTH);
2132         return 0;
2133 }
2134
2135 static int qeth_cm_enable(struct qeth_card *card)
2136 {
2137         int rc;
2138         struct qeth_cmd_buffer *iob;
2139
2140         QETH_DBF_TEXT(SETUP, 2, "cmenable");
2141
2142         iob = qeth_wait_for_buffer(&card->write);
2143         memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
2144         memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2145                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2146         memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2147                &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2148
2149         rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
2150                                     qeth_cm_enable_cb, NULL);
2151         return rc;
2152 }
2153
2154 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2155                 unsigned long data)
2156 {
2157         struct qeth_cmd_buffer *iob;
2158
2159         QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
2160
2161         iob = (struct qeth_cmd_buffer *) data;
2162         memcpy(&card->token.cm_connection_r,
2163                QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2164                QETH_MPC_TOKEN_LENGTH);
2165         return 0;
2166 }
2167
2168 static int qeth_cm_setup(struct qeth_card *card)
2169 {
2170         int rc;
2171         struct qeth_cmd_buffer *iob;
2172
2173         QETH_DBF_TEXT(SETUP, 2, "cmsetup");
2174
2175         iob = qeth_wait_for_buffer(&card->write);
2176         memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
2177         memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2178                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2179         memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2180                &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2181         memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2182                &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2183         rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
2184                                     qeth_cm_setup_cb, NULL);
2185         return rc;
2186 }
2187
2188 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2189 {
2190         struct net_device *dev = card->dev;
2191         unsigned int new_mtu;
2192
2193         if (!max_mtu) {
2194                 /* IQD needs accurate max MTU to set up its RX buffers: */
2195                 if (IS_IQD(card))
2196                         return -EINVAL;
2197                 /* tolerate quirky HW: */
2198                 max_mtu = ETH_MAX_MTU;
2199         }
2200
2201         rtnl_lock();
2202         if (IS_IQD(card)) {
2203                 /* move any device with default MTU to new max MTU: */
2204                 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2205
2206                 /* adjust RX buffer size to new max MTU: */
2207                 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2208                 if (dev->max_mtu && dev->max_mtu != max_mtu)
2209                         qeth_free_qdio_buffers(card);
2210         } else {
2211                 if (dev->mtu)
2212                         new_mtu = dev->mtu;
2213                 /* default MTUs for first setup: */
2214                 else if (IS_LAYER2(card))
2215                         new_mtu = ETH_DATA_LEN;
2216                 else
2217                         new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2218         }
2219
2220         dev->max_mtu = max_mtu;
2221         dev->mtu = min(new_mtu, max_mtu);
2222         rtnl_unlock();
2223         return 0;
2224 }
2225
2226 static int qeth_get_mtu_outof_framesize(int framesize)
2227 {
2228         switch (framesize) {
2229         case 0x4000:
2230                 return 8192;
2231         case 0x6000:
2232                 return 16384;
2233         case 0xa000:
2234                 return 32768;
2235         case 0xffff:
2236                 return 57344;
2237         default:
2238                 return 0;
2239         }
2240 }
2241
2242 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2243                 unsigned long data)
2244 {
2245         __u16 mtu, framesize;
2246         __u16 len;
2247         __u8 link_type;
2248         struct qeth_cmd_buffer *iob;
2249
2250         QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
2251
2252         iob = (struct qeth_cmd_buffer *) data;
2253         memcpy(&card->token.ulp_filter_r,
2254                QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2255                QETH_MPC_TOKEN_LENGTH);
2256         if (card->info.type == QETH_CARD_TYPE_IQD) {
2257                 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2258                 mtu = qeth_get_mtu_outof_framesize(framesize);
2259         } else {
2260                 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2261         }
2262         *(u16 *)reply->param = mtu;
2263
2264         memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2265         if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2266                 memcpy(&link_type,
2267                        QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2268                 card->info.link_type = link_type;
2269         } else
2270                 card->info.link_type = 0;
2271         QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
2272         return 0;
2273 }
2274
2275 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2276 {
2277         if (IS_OSN(card))
2278                 return QETH_PROT_OSN2;
2279         return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2280 }
2281
2282 static int qeth_ulp_enable(struct qeth_card *card)
2283 {
2284         u8 prot_type = qeth_mpc_select_prot_type(card);
2285         struct qeth_cmd_buffer *iob;
2286         u16 max_mtu;
2287         int rc;
2288
2289         /*FIXME: trace view callbacks*/
2290         QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
2291
2292         iob = qeth_wait_for_buffer(&card->write);
2293         memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
2294
2295         *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2296         memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2297         memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2298                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2299         memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2300                &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2301         rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
2302                                     qeth_ulp_enable_cb, &max_mtu);
2303         if (rc)
2304                 return rc;
2305         return qeth_update_max_mtu(card, max_mtu);
2306 }
2307
2308 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2309                 unsigned long data)
2310 {
2311         struct qeth_cmd_buffer *iob;
2312
2313         QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2314
2315         iob = (struct qeth_cmd_buffer *) data;
2316         memcpy(&card->token.ulp_connection_r,
2317                QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2318                QETH_MPC_TOKEN_LENGTH);
2319         if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2320                      3)) {
2321                 QETH_DBF_TEXT(SETUP, 2, "olmlimit");
2322                 dev_err(&card->gdev->dev, "A connection could not be "
2323                         "established because of an OLM limit\n");
2324                 return -EMLINK;
2325         }
2326         return 0;
2327 }
2328
2329 static int qeth_ulp_setup(struct qeth_card *card)
2330 {
2331         int rc;
2332         __u16 temp;
2333         struct qeth_cmd_buffer *iob;
2334         struct ccw_dev_id dev_id;
2335
2336         QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
2337
2338         iob = qeth_wait_for_buffer(&card->write);
2339         memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
2340
2341         memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2342                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2343         memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2344                &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2345         memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2346                &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2347
2348         ccw_device_get_id(CARD_DDEV(card), &dev_id);
2349         memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2350         temp = (card->info.cula << 8) + card->info.unit_addr2;
2351         memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2352         rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
2353                                     qeth_ulp_setup_cb, NULL);
2354         return rc;
2355 }
2356
2357 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2358 {
2359         struct qeth_qdio_out_buffer *newbuf;
2360
2361         newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2362         if (!newbuf)
2363                 return -ENOMEM;
2364
2365         newbuf->buffer = q->qdio_bufs[bidx];
2366         skb_queue_head_init(&newbuf->skb_list);
2367         lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2368         newbuf->q = q;
2369         newbuf->next_pending = q->bufs[bidx];
2370         atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2371         q->bufs[bidx] = newbuf;
2372         return 0;
2373 }
2374
2375 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2376 {
2377         if (!q)
2378                 return;
2379
2380         qeth_clear_outq_buffers(q, 1);
2381         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2382         kfree(q);
2383 }
2384
2385 static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2386 {
2387         struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2388
2389         if (!q)
2390                 return NULL;
2391
2392         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2393                 kfree(q);
2394                 return NULL;
2395         }
2396         return q;
2397 }
2398
2399 static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2400 {
2401         int i, j;
2402
2403         QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
2404
2405         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2406                 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2407                 return 0;
2408
2409         QETH_DBF_TEXT(SETUP, 2, "inq");
2410         card->qdio.in_q = qeth_alloc_qdio_queue();
2411         if (!card->qdio.in_q)
2412                 goto out_nomem;
2413
2414         /* inbound buffer pool */
2415         if (qeth_alloc_buffer_pool(card))
2416                 goto out_freeinq;
2417
2418         /* outbound */
2419         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2420                 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2421                 if (!card->qdio.out_qs[i])
2422                         goto out_freeoutq;
2423                 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2424                 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2425                 card->qdio.out_qs[i]->queue_no = i;
2426                 /* give outbound qeth_qdio_buffers their qdio_buffers */
2427                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2428                         WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2429                         if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2430                                 goto out_freeoutqbufs;
2431                 }
2432         }
2433
2434         /* completion */
2435         if (qeth_alloc_cq(card))
2436                 goto out_freeoutq;
2437
2438         return 0;
2439
2440 out_freeoutqbufs:
2441         while (j > 0) {
2442                 --j;
2443                 kmem_cache_free(qeth_qdio_outbuf_cache,
2444                                 card->qdio.out_qs[i]->bufs[j]);
2445                 card->qdio.out_qs[i]->bufs[j] = NULL;
2446         }
2447 out_freeoutq:
2448         while (i > 0) {
2449                 qeth_free_output_queue(card->qdio.out_qs[--i]);
2450                 card->qdio.out_qs[i] = NULL;
2451         }
2452         qeth_free_buffer_pool(card);
2453 out_freeinq:
2454         qeth_free_qdio_queue(card->qdio.in_q);
2455         card->qdio.in_q = NULL;
2456 out_nomem:
2457         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2458         return -ENOMEM;
2459 }
2460
2461 static void qeth_free_qdio_buffers(struct qeth_card *card)
2462 {
2463         int i, j;
2464
2465         if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2466                 QETH_QDIO_UNINITIALIZED)
2467                 return;
2468
2469         qeth_free_cq(card);
2470         cancel_delayed_work_sync(&card->buffer_reclaim_work);
2471         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2472                 if (card->qdio.in_q->bufs[j].rx_skb)
2473                         dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2474         }
2475         qeth_free_qdio_queue(card->qdio.in_q);
2476         card->qdio.in_q = NULL;
2477         /* inbound buffer pool */
2478         qeth_free_buffer_pool(card);
2479         /* free outbound qdio_qs */
2480         for (i = 0; i < card->qdio.no_out_queues; i++) {
2481                 qeth_free_output_queue(card->qdio.out_qs[i]);
2482                 card->qdio.out_qs[i] = NULL;
2483         }
2484 }
2485
2486 static void qeth_create_qib_param_field(struct qeth_card *card,
2487                 char *param_field)
2488 {
2489
2490         param_field[0] = _ascebc['P'];
2491         param_field[1] = _ascebc['C'];
2492         param_field[2] = _ascebc['I'];
2493         param_field[3] = _ascebc['T'];
2494         *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2495         *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2496         *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2497 }
2498
2499 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2500                 char *param_field)
2501 {
2502         param_field[16] = _ascebc['B'];
2503         param_field[17] = _ascebc['L'];
2504         param_field[18] = _ascebc['K'];
2505         param_field[19] = _ascebc['T'];
2506         *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2507         *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2508         *((unsigned int *) (&param_field[28])) =
2509                 card->info.blkt.inter_packet_jumbo;
2510 }
2511
2512 static int qeth_qdio_activate(struct qeth_card *card)
2513 {
2514         QETH_DBF_TEXT(SETUP, 3, "qdioact");
2515         return qdio_activate(CARD_DDEV(card));
2516 }
2517
2518 static int qeth_dm_act(struct qeth_card *card)
2519 {
2520         int rc;
2521         struct qeth_cmd_buffer *iob;
2522
2523         QETH_DBF_TEXT(SETUP, 2, "dmact");
2524
2525         iob = qeth_wait_for_buffer(&card->write);
2526         memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
2527
2528         memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2529                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2530         memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2531                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2532         rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
2533         return rc;
2534 }
2535
2536 static int qeth_mpc_initialize(struct qeth_card *card)
2537 {
2538         int rc;
2539
2540         QETH_DBF_TEXT(SETUP, 2, "mpcinit");
2541
2542         rc = qeth_issue_next_read(card);
2543         if (rc) {
2544                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2545                 return rc;
2546         }
2547         rc = qeth_cm_enable(card);
2548         if (rc) {
2549                 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2550                 goto out_qdio;
2551         }
2552         rc = qeth_cm_setup(card);
2553         if (rc) {
2554                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
2555                 goto out_qdio;
2556         }
2557         rc = qeth_ulp_enable(card);
2558         if (rc) {
2559                 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
2560                 goto out_qdio;
2561         }
2562         rc = qeth_ulp_setup(card);
2563         if (rc) {
2564                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2565                 goto out_qdio;
2566         }
2567         rc = qeth_alloc_qdio_buffers(card);
2568         if (rc) {
2569                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
2570                 goto out_qdio;
2571         }
2572         rc = qeth_qdio_establish(card);
2573         if (rc) {
2574                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
2575                 qeth_free_qdio_buffers(card);
2576                 goto out_qdio;
2577         }
2578         rc = qeth_qdio_activate(card);
2579         if (rc) {
2580                 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
2581                 goto out_qdio;
2582         }
2583         rc = qeth_dm_act(card);
2584         if (rc) {
2585                 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
2586                 goto out_qdio;
2587         }
2588
2589         return 0;
2590 out_qdio:
2591         qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
2592         qdio_free(CARD_DDEV(card));
2593         return rc;
2594 }
2595
2596 void qeth_print_status_message(struct qeth_card *card)
2597 {
2598         switch (card->info.type) {
2599         case QETH_CARD_TYPE_OSD:
2600         case QETH_CARD_TYPE_OSM:
2601         case QETH_CARD_TYPE_OSX:
2602                 /* VM will use a non-zero first character
2603                  * to indicate a HiperSockets like reporting
2604                  * of the level OSA sets the first character to zero
2605                  * */
2606                 if (!card->info.mcl_level[0]) {
2607                         sprintf(card->info.mcl_level, "%02x%02x",
2608                                 card->info.mcl_level[2],
2609                                 card->info.mcl_level[3]);
2610                         break;
2611                 }
2612                 /* fallthrough */
2613         case QETH_CARD_TYPE_IQD:
2614                 if ((card->info.guestlan) ||
2615                     (card->info.mcl_level[0] & 0x80)) {
2616                         card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2617                                 card->info.mcl_level[0]];
2618                         card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2619                                 card->info.mcl_level[1]];
2620                         card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2621                                 card->info.mcl_level[2]];
2622                         card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2623                                 card->info.mcl_level[3]];
2624                         card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2625                 }
2626                 break;
2627         default:
2628                 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2629         }
2630         dev_info(&card->gdev->dev,
2631                  "Device is a%s card%s%s%s\nwith link type %s.\n",
2632                  qeth_get_cardname(card),
2633                  (card->info.mcl_level[0]) ? " (level: " : "",
2634                  (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2635                  (card->info.mcl_level[0]) ? ")" : "",
2636                  qeth_get_cardname_short(card));
2637 }
2638 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2639
2640 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2641 {
2642         struct qeth_buffer_pool_entry *entry;
2643
2644         QETH_CARD_TEXT(card, 5, "inwrklst");
2645
2646         list_for_each_entry(entry,
2647                             &card->qdio.init_pool.entry_list, init_list) {
2648                 qeth_put_buffer_pool_entry(card, entry);
2649         }
2650 }
2651
2652 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2653                                         struct qeth_card *card)
2654 {
2655         struct list_head *plh;
2656         struct qeth_buffer_pool_entry *entry;
2657         int i, free;
2658         struct page *page;
2659
2660         if (list_empty(&card->qdio.in_buf_pool.entry_list))
2661                 return NULL;
2662
2663         list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2664                 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2665                 free = 1;
2666                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2667                         if (page_count(virt_to_page(entry->elements[i])) > 1) {
2668                                 free = 0;
2669                                 break;
2670                         }
2671                 }
2672                 if (free) {
2673                         list_del_init(&entry->list);
2674                         return entry;
2675                 }
2676         }
2677
2678         /* no free buffer in pool so take first one and swap pages */
2679         entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2680                         struct qeth_buffer_pool_entry, list);
2681         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2682                 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2683                         page = alloc_page(GFP_ATOMIC);
2684                         if (!page) {
2685                                 return NULL;
2686                         } else {
2687                                 free_page((unsigned long)entry->elements[i]);
2688                                 entry->elements[i] = page_address(page);
2689                                 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2690                         }
2691                 }
2692         }
2693         list_del_init(&entry->list);
2694         return entry;
2695 }
2696
2697 static int qeth_init_input_buffer(struct qeth_card *card,
2698                 struct qeth_qdio_buffer *buf)
2699 {
2700         struct qeth_buffer_pool_entry *pool_entry;
2701         int i;
2702
2703         if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2704                 buf->rx_skb = netdev_alloc_skb(card->dev,
2705                                                QETH_RX_PULL_LEN + ETH_HLEN);
2706                 if (!buf->rx_skb)
2707                         return 1;
2708         }
2709
2710         pool_entry = qeth_find_free_buffer_pool_entry(card);
2711         if (!pool_entry)
2712                 return 1;
2713
2714         /*
2715          * since the buffer is accessed only from the input_tasklet
2716          * there shouldn't be a need to synchronize; also, since we use
2717          * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2718          * buffers
2719          */
2720
2721         buf->pool_entry = pool_entry;
2722         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2723                 buf->buffer->element[i].length = PAGE_SIZE;
2724                 buf->buffer->element[i].addr =  pool_entry->elements[i];
2725                 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2726                         buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2727                 else
2728                         buf->buffer->element[i].eflags = 0;
2729                 buf->buffer->element[i].sflags = 0;
2730         }
2731         return 0;
2732 }
2733
2734 int qeth_init_qdio_queues(struct qeth_card *card)
2735 {
2736         int i, j;
2737         int rc;
2738
2739         QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2740
2741         /* inbound queue */
2742         qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2743         memset(&card->rx, 0, sizeof(struct qeth_rx));
2744         qeth_initialize_working_pool_list(card);
2745         /*give only as many buffers to hardware as we have buffer pool entries*/
2746         for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2747                 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2748         card->qdio.in_q->next_buf_to_init =
2749                 card->qdio.in_buf_pool.buf_count - 1;
2750         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2751                      card->qdio.in_buf_pool.buf_count - 1);
2752         if (rc) {
2753                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2754                 return rc;
2755         }
2756
2757         /* completion */
2758         rc = qeth_cq_init(card);
2759         if (rc) {
2760                 return rc;
2761         }
2762
2763         /* outbound queue */
2764         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2765                 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2766                                    QDIO_MAX_BUFFERS_PER_Q);
2767                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2768                         qeth_clear_output_buffer(card->qdio.out_qs[i],
2769                                                  card->qdio.out_qs[i]->bufs[j]);
2770                 }
2771                 card->qdio.out_qs[i]->card = card;
2772                 card->qdio.out_qs[i]->next_buf_to_fill = 0;
2773                 card->qdio.out_qs[i]->do_pack = 0;
2774                 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
2775                 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
2776                 atomic_set(&card->qdio.out_qs[i]->state,
2777                            QETH_OUT_Q_UNLOCKED);
2778         }
2779         return 0;
2780 }
2781 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2782
2783 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
2784 {
2785         switch (link_type) {
2786         case QETH_LINK_TYPE_HSTR:
2787                 return 2;
2788         default:
2789                 return 1;
2790         }
2791 }
2792
2793 static void qeth_fill_ipacmd_header(struct qeth_card *card,
2794                                     struct qeth_ipa_cmd *cmd,
2795                                     enum qeth_ipa_cmds command,
2796                                     enum qeth_prot_versions prot)
2797 {
2798         cmd->hdr.command = command;
2799         cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2800         /* cmd->hdr.seqno is set by qeth_send_control_data() */
2801         cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2802         cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
2803         cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
2804         cmd->hdr.param_count = 1;
2805         cmd->hdr.prot_version = prot;
2806 }
2807
2808 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2809                           u16 cmd_length)
2810 {
2811         u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
2812         u8 prot_type = qeth_mpc_select_prot_type(card);
2813
2814         memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2815         memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2816         memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2817         memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2818         memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2819         memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2820                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2821         memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2822 }
2823 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2824
2825 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
2826                 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
2827 {
2828         struct qeth_cmd_buffer *iob;
2829
2830         iob = qeth_get_buffer(&card->write);
2831         if (iob) {
2832                 qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd));
2833                 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
2834         } else {
2835                 dev_warn(&card->gdev->dev,
2836                          "The qeth driver ran out of channel command buffers\n");
2837                 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
2838                                  CARD_DEVID(card));
2839         }
2840
2841         return iob;
2842 }
2843 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
2844
2845 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2846                                 struct qeth_reply *reply, unsigned long data)
2847 {
2848         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2849
2850         return (cmd->hdr.return_code) ? -EIO : 0;
2851 }
2852
2853 /**
2854  * qeth_send_ipa_cmd() - send an IPA command
2855  *
2856  * See qeth_send_control_data() for explanation of the arguments.
2857  */
2858
2859 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2860                 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2861                         unsigned long),
2862                 void *reply_param)
2863 {
2864         u16 length;
2865         int rc;
2866
2867         QETH_CARD_TEXT(card, 4, "sendipa");
2868
2869         if (reply_cb == NULL)
2870                 reply_cb = qeth_send_ipa_cmd_cb;
2871         memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
2872         rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param);
2873         if (rc == -ETIME) {
2874                 qeth_clear_ipacmd_list(card);
2875                 qeth_schedule_recovery(card);
2876         }
2877         return rc;
2878 }
2879 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2880
2881 static int qeth_send_startlan_cb(struct qeth_card *card,
2882                                  struct qeth_reply *reply, unsigned long data)
2883 {
2884         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2885
2886         if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2887                 return -ENETDOWN;
2888
2889         return (cmd->hdr.return_code) ? -EIO : 0;
2890 }
2891
2892 static int qeth_send_startlan(struct qeth_card *card)
2893 {
2894         struct qeth_cmd_buffer *iob;
2895
2896         QETH_DBF_TEXT(SETUP, 2, "strtlan");
2897
2898         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
2899         if (!iob)
2900                 return -ENOMEM;
2901         return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2902 }
2903
2904 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2905 {
2906         if (!cmd->hdr.return_code)
2907                 cmd->hdr.return_code =
2908                         cmd->data.setadapterparms.hdr.return_code;
2909         return cmd->hdr.return_code;
2910 }
2911
2912 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2913                 struct qeth_reply *reply, unsigned long data)
2914 {
2915         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2916
2917         QETH_CARD_TEXT(card, 3, "quyadpcb");
2918         if (qeth_setadpparms_inspect_rc(cmd))
2919                 return -EIO;
2920
2921         if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2922                 card->info.link_type =
2923                       cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2924                 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
2925         }
2926         card->options.adp.supported_funcs =
2927                 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2928         return 0;
2929 }
2930
2931 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2932                 __u32 command, __u32 cmdlen)
2933 {
2934         struct qeth_cmd_buffer *iob;
2935         struct qeth_ipa_cmd *cmd;
2936
2937         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
2938                                      QETH_PROT_IPV4);
2939         if (iob) {
2940                 cmd = __ipa_cmd(iob);
2941                 cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
2942                 cmd->data.setadapterparms.hdr.command_code = command;
2943                 cmd->data.setadapterparms.hdr.used_total = 1;
2944                 cmd->data.setadapterparms.hdr.seq_no = 1;
2945         }
2946
2947         return iob;
2948 }
2949
2950 static int qeth_query_setadapterparms(struct qeth_card *card)
2951 {
2952         int rc;
2953         struct qeth_cmd_buffer *iob;
2954
2955         QETH_CARD_TEXT(card, 3, "queryadp");
2956         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2957                                    sizeof(struct qeth_ipacmd_setadpparms));
2958         if (!iob)
2959                 return -ENOMEM;
2960         rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2961         return rc;
2962 }
2963
2964 static int qeth_query_ipassists_cb(struct qeth_card *card,
2965                 struct qeth_reply *reply, unsigned long data)
2966 {
2967         struct qeth_ipa_cmd *cmd;
2968
2969         QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2970
2971         cmd = (struct qeth_ipa_cmd *) data;
2972
2973         switch (cmd->hdr.return_code) {
2974         case IPA_RC_SUCCESS:
2975                 break;
2976         case IPA_RC_NOTSUPP:
2977         case IPA_RC_L2_UNSUPPORTED_CMD:
2978                 QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
2979                 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2980                 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2981                 return -EOPNOTSUPP;
2982         default:
2983                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2984                                  CARD_DEVID(card), cmd->hdr.return_code);
2985                 return -EIO;
2986         }
2987
2988         if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2989                 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2990                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2991         } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2992                 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2993                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2994         } else
2995                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2996                                  CARD_DEVID(card));
2997         return 0;
2998 }
2999
3000 static int qeth_query_ipassists(struct qeth_card *card,
3001                                 enum qeth_prot_versions prot)
3002 {
3003         int rc;
3004         struct qeth_cmd_buffer *iob;
3005
3006         QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
3007         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
3008         if (!iob)
3009                 return -ENOMEM;
3010         rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3011         return rc;
3012 }
3013
3014 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3015                                 struct qeth_reply *reply, unsigned long data)
3016 {
3017         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3018         struct qeth_query_switch_attributes *attrs;
3019         struct qeth_switch_info *sw_info;
3020
3021         QETH_CARD_TEXT(card, 2, "qswiatcb");
3022         if (qeth_setadpparms_inspect_rc(cmd))
3023                 return -EIO;
3024
3025         sw_info = (struct qeth_switch_info *)reply->param;
3026         attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3027         sw_info->capabilities = attrs->capabilities;
3028         sw_info->settings = attrs->settings;
3029         QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3030                         sw_info->settings);
3031         return 0;
3032 }
3033
3034 int qeth_query_switch_attributes(struct qeth_card *card,
3035                                  struct qeth_switch_info *sw_info)
3036 {
3037         struct qeth_cmd_buffer *iob;
3038
3039         QETH_CARD_TEXT(card, 2, "qswiattr");
3040         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3041                 return -EOPNOTSUPP;
3042         if (!netif_carrier_ok(card->dev))
3043                 return -ENOMEDIUM;
3044         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
3045                                 sizeof(struct qeth_ipacmd_setadpparms_hdr));
3046         if (!iob)
3047                 return -ENOMEM;
3048         return qeth_send_ipa_cmd(card, iob,
3049                                 qeth_query_switch_attributes_cb, sw_info);
3050 }
3051
3052 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3053                 struct qeth_reply *reply, unsigned long data)
3054 {
3055         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3056         u16 rc = cmd->hdr.return_code;
3057
3058         if (rc) {
3059                 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3060                 return -EIO;
3061         }
3062
3063         card->info.diagass_support = cmd->data.diagass.ext;
3064         return 0;
3065 }
3066
3067 static int qeth_query_setdiagass(struct qeth_card *card)
3068 {
3069         struct qeth_cmd_buffer *iob;
3070         struct qeth_ipa_cmd    *cmd;
3071
3072         QETH_DBF_TEXT(SETUP, 2, "qdiagass");
3073         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3074         if (!iob)
3075                 return -ENOMEM;
3076         cmd = __ipa_cmd(iob);
3077         cmd->data.diagass.subcmd_len = 16;
3078         cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
3079         return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3080 }
3081
3082 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3083 {
3084         unsigned long info = get_zeroed_page(GFP_KERNEL);
3085         struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3086         struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3087         struct ccw_dev_id ccwid;
3088         int level;
3089
3090         tid->chpid = card->info.chpid;
3091         ccw_device_get_id(CARD_RDEV(card), &ccwid);
3092         tid->ssid = ccwid.ssid;
3093         tid->devno = ccwid.devno;
3094         if (!info)
3095                 return;
3096         level = stsi(NULL, 0, 0, 0);
3097         if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3098                 tid->lparnr = info222->lpar_number;
3099         if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3100                 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3101                 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3102         }
3103         free_page(info);
3104         return;
3105 }
3106
3107 static int qeth_hw_trap_cb(struct qeth_card *card,
3108                 struct qeth_reply *reply, unsigned long data)
3109 {
3110         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3111         u16 rc = cmd->hdr.return_code;
3112
3113         if (rc) {
3114                 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3115                 return -EIO;
3116         }
3117         return 0;
3118 }
3119
3120 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3121 {
3122         struct qeth_cmd_buffer *iob;
3123         struct qeth_ipa_cmd *cmd;
3124
3125         QETH_DBF_TEXT(SETUP, 2, "diagtrap");
3126         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
3127         if (!iob)
3128                 return -ENOMEM;
3129         cmd = __ipa_cmd(iob);
3130         cmd->data.diagass.subcmd_len = 80;
3131         cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
3132         cmd->data.diagass.type = 1;
3133         cmd->data.diagass.action = action;
3134         switch (action) {
3135         case QETH_DIAGS_TRAP_ARM:
3136                 cmd->data.diagass.options = 0x0003;
3137                 cmd->data.diagass.ext = 0x00010000 +
3138                         sizeof(struct qeth_trap_id);
3139                 qeth_get_trap_id(card,
3140                         (struct qeth_trap_id *)cmd->data.diagass.cdata);
3141                 break;
3142         case QETH_DIAGS_TRAP_DISARM:
3143                 cmd->data.diagass.options = 0x0001;
3144                 break;
3145         case QETH_DIAGS_TRAP_CAPTURE:
3146                 break;
3147         }
3148         return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3149 }
3150 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3151
3152 static int qeth_check_qdio_errors(struct qeth_card *card,
3153                                   struct qdio_buffer *buf,
3154                                   unsigned int qdio_error,
3155                                   const char *dbftext)
3156 {
3157         if (qdio_error) {
3158                 QETH_CARD_TEXT(card, 2, dbftext);
3159                 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3160                                buf->element[15].sflags);
3161                 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3162                                buf->element[14].sflags);
3163                 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3164                 if ((buf->element[15].sflags) == 0x12) {
3165                         QETH_CARD_STAT_INC(card, rx_dropped);
3166                         return 0;
3167                 } else
3168                         return 1;
3169         }
3170         return 0;
3171 }
3172
3173 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3174 {
3175         struct qeth_qdio_q *queue = card->qdio.in_q;
3176         struct list_head *lh;
3177         int count;
3178         int i;
3179         int rc;
3180         int newcount = 0;
3181
3182         count = (index < queue->next_buf_to_init)?
3183                 card->qdio.in_buf_pool.buf_count -
3184                 (queue->next_buf_to_init - index) :
3185                 card->qdio.in_buf_pool.buf_count -
3186                 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3187         /* only requeue at a certain threshold to avoid SIGAs */
3188         if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3189                 for (i = queue->next_buf_to_init;
3190                      i < queue->next_buf_to_init + count; ++i) {
3191                         if (qeth_init_input_buffer(card,
3192                                 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3193                                 break;
3194                         } else {
3195                                 newcount++;
3196                         }
3197                 }
3198
3199                 if (newcount < count) {
3200                         /* we are in memory shortage so we switch back to
3201                            traditional skb allocation and drop packages */
3202                         atomic_set(&card->force_alloc_skb, 3);
3203                         count = newcount;
3204                 } else {
3205                         atomic_add_unless(&card->force_alloc_skb, -1, 0);
3206                 }
3207
3208                 if (!count) {
3209                         i = 0;
3210                         list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3211                                 i++;
3212                         if (i == card->qdio.in_buf_pool.buf_count) {
3213                                 QETH_CARD_TEXT(card, 2, "qsarbw");
3214                                 card->reclaim_index = index;
3215                                 schedule_delayed_work(
3216                                         &card->buffer_reclaim_work,
3217                                         QETH_RECLAIM_WORK_TIME);
3218                         }
3219                         return;
3220                 }
3221
3222                 /*
3223                  * according to old code it should be avoided to requeue all
3224                  * 128 buffers in order to benefit from PCI avoidance.
3225                  * this function keeps at least one buffer (the buffer at
3226                  * 'index') un-requeued -> this buffer is the first buffer that
3227                  * will be requeued the next time
3228                  */
3229                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3230                              queue->next_buf_to_init, count);
3231                 if (rc) {
3232                         QETH_CARD_TEXT(card, 2, "qinberr");
3233                 }
3234                 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3235                                           QDIO_MAX_BUFFERS_PER_Q;
3236         }
3237 }
3238
3239 static void qeth_buffer_reclaim_work(struct work_struct *work)
3240 {
3241         struct qeth_card *card = container_of(work, struct qeth_card,
3242                 buffer_reclaim_work.work);
3243
3244         QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3245         qeth_queue_input_buffer(card, card->reclaim_index);
3246 }
3247
3248 static void qeth_handle_send_error(struct qeth_card *card,
3249                 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3250 {
3251         int sbalf15 = buffer->buffer->element[15].sflags;
3252
3253         QETH_CARD_TEXT(card, 6, "hdsnderr");
3254         if (card->info.type == QETH_CARD_TYPE_IQD) {
3255                 if (sbalf15 == 0) {
3256                         qdio_err = 0;
3257                 } else {
3258                         qdio_err = 1;
3259                 }
3260         }
3261         qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3262
3263         if (!qdio_err)
3264                 return;
3265
3266         if ((sbalf15 >= 15) && (sbalf15 <= 31))
3267                 return;
3268
3269         QETH_CARD_TEXT(card, 1, "lnkfail");
3270         QETH_CARD_TEXT_(card, 1, "%04x %02x",
3271                        (u16)qdio_err, (u8)sbalf15);
3272 }
3273
3274 /**
3275  * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3276  * @queue: queue to check for packing buffer
3277  *
3278  * Returns number of buffers that were prepared for flush.
3279  */
3280 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3281 {
3282         struct qeth_qdio_out_buffer *buffer;
3283
3284         buffer = queue->bufs[queue->next_buf_to_fill];
3285         if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3286             (buffer->next_element_to_fill > 0)) {
3287                 /* it's a packing buffer */
3288                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3289                 queue->next_buf_to_fill =
3290                         (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3291                 return 1;
3292         }
3293         return 0;
3294 }
3295
3296 /*
3297  * Switched to packing state if the number of used buffers on a queue
3298  * reaches a certain limit.
3299  */
3300 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3301 {
3302         if (!queue->do_pack) {
3303                 if (atomic_read(&queue->used_buffers)
3304                     >= QETH_HIGH_WATERMARK_PACK){
3305                         /* switch non-PACKING -> PACKING */
3306                         QETH_CARD_TEXT(queue->card, 6, "np->pack");
3307                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3308                         queue->do_pack = 1;
3309                 }
3310         }
3311 }
3312
3313 /*
3314  * Switches from packing to non-packing mode. If there is a packing
3315  * buffer on the queue this buffer will be prepared to be flushed.
3316  * In that case 1 is returned to inform the caller. If no buffer
3317  * has to be flushed, zero is returned.
3318  */
3319 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3320 {
3321         if (queue->do_pack) {
3322                 if (atomic_read(&queue->used_buffers)
3323                     <= QETH_LOW_WATERMARK_PACK) {
3324                         /* switch PACKING -> non-PACKING */
3325                         QETH_CARD_TEXT(queue->card, 6, "pack->np");
3326                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3327                         queue->do_pack = 0;
3328                         return qeth_prep_flush_pack_buffer(queue);
3329                 }
3330         }
3331         return 0;
3332 }
3333
3334 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3335                                int count)
3336 {
3337         struct qeth_qdio_out_buffer *buf;
3338         int rc;
3339         int i;
3340         unsigned int qdio_flags;
3341
3342         for (i = index; i < index + count; ++i) {
3343                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3344                 buf = queue->bufs[bidx];
3345                 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3346                                 SBAL_EFLAGS_LAST_ENTRY;
3347
3348                 if (queue->bufstates)
3349                         queue->bufstates[bidx].user = buf;
3350
3351                 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3352                         continue;
3353
3354                 if (!queue->do_pack) {
3355                         if ((atomic_read(&queue->used_buffers) >=
3356                                 (QETH_HIGH_WATERMARK_PACK -
3357                                  QETH_WATERMARK_PACK_FUZZ)) &&
3358                             !atomic_read(&queue->set_pci_flags_count)) {
3359                                 /* it's likely that we'll go to packing
3360                                  * mode soon */
3361                                 atomic_inc(&queue->set_pci_flags_count);
3362                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3363                         }
3364                 } else {
3365                         if (!atomic_read(&queue->set_pci_flags_count)) {
3366                                 /*
3367                                  * there's no outstanding PCI any more, so we
3368                                  * have to request a PCI to be sure the the PCI
3369                                  * will wake at some time in the future then we
3370                                  * can flush packed buffers that might still be
3371                                  * hanging around, which can happen if no
3372                                  * further send was requested by the stack
3373                                  */
3374                                 atomic_inc(&queue->set_pci_flags_count);
3375                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3376                         }
3377                 }
3378         }
3379
3380         QETH_TXQ_STAT_ADD(queue, bufs, count);
3381         netif_trans_update(queue->card->dev);
3382         qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3383         if (atomic_read(&queue->set_pci_flags_count))
3384                 qdio_flags |= QDIO_FLAG_PCI_OUT;
3385         atomic_add(count, &queue->used_buffers);
3386         rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3387                      queue->queue_no, index, count);
3388         if (rc) {
3389                 QETH_TXQ_STAT_ADD(queue, tx_errors, count);
3390                 /* ignore temporary SIGA errors without busy condition */
3391                 if (rc == -ENOBUFS)
3392                         return;
3393                 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3394                 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3395                 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3396                 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3397                 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3398
3399                 /* this must not happen under normal circumstances. if it
3400                  * happens something is really wrong -> recover */
3401                 qeth_schedule_recovery(queue->card);
3402                 return;
3403         }
3404 }
3405
3406 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3407 {
3408         int index;
3409         int flush_cnt = 0;
3410         int q_was_packing = 0;
3411
3412         /*
3413          * check if weed have to switch to non-packing mode or if
3414          * we have to get a pci flag out on the queue
3415          */
3416         if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3417             !atomic_read(&queue->set_pci_flags_count)) {
3418                 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3419                                 QETH_OUT_Q_UNLOCKED) {
3420                         /*
3421                          * If we get in here, there was no action in
3422                          * do_send_packet. So, we check if there is a
3423                          * packing buffer to be flushed here.
3424                          */
3425                         netif_stop_queue(queue->card->dev);
3426                         index = queue->next_buf_to_fill;
3427                         q_was_packing = queue->do_pack;
3428                         /* queue->do_pack may change */
3429                         barrier();
3430                         flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3431                         if (!flush_cnt &&
3432                             !atomic_read(&queue->set_pci_flags_count))
3433                                 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3434                         if (q_was_packing)
3435                                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3436                         if (flush_cnt)
3437                                 qeth_flush_buffers(queue, index, flush_cnt);
3438                         atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3439                 }
3440         }
3441 }
3442
3443 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3444                                  unsigned long card_ptr)
3445 {
3446         struct qeth_card *card = (struct qeth_card *)card_ptr;
3447
3448         if (card->dev->flags & IFF_UP)
3449                 napi_schedule(&card->napi);
3450 }
3451
3452 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3453 {
3454         int rc;
3455
3456         if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3457                 rc = -1;
3458                 goto out;
3459         } else {
3460                 if (card->options.cq == cq) {
3461                         rc = 0;
3462                         goto out;
3463                 }
3464
3465                 if (card->state != CARD_STATE_DOWN) {
3466                         rc = -1;
3467                         goto out;
3468                 }
3469
3470                 qeth_free_qdio_buffers(card);
3471                 card->options.cq = cq;
3472                 rc = 0;
3473         }
3474 out:
3475         return rc;
3476
3477 }
3478 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3479
3480 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3481                                  unsigned int queue, int first_element,
3482                                  int count)
3483 {
3484         struct qeth_qdio_q *cq = card->qdio.c_q;
3485         int i;
3486         int rc;
3487
3488         if (!qeth_is_cq(card, queue))
3489                 return;
3490
3491         QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3492         QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3493         QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3494
3495         if (qdio_err) {
3496                 netif_stop_queue(card->dev);
3497                 qeth_schedule_recovery(card);
3498                 return;
3499         }
3500
3501         for (i = first_element; i < first_element + count; ++i) {
3502                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3503                 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3504                 int e = 0;
3505
3506                 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3507                        buffer->element[e].addr) {
3508                         unsigned long phys_aob_addr;
3509
3510                         phys_aob_addr = (unsigned long) buffer->element[e].addr;
3511                         qeth_qdio_handle_aob(card, phys_aob_addr);
3512                         ++e;
3513                 }
3514                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3515         }
3516         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3517                     card->qdio.c_q->next_buf_to_init,
3518                     count);
3519         if (rc) {
3520                 dev_warn(&card->gdev->dev,
3521                         "QDIO reported an error, rc=%i\n", rc);
3522                 QETH_CARD_TEXT(card, 2, "qcqherr");
3523         }
3524         card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3525                                    + count) % QDIO_MAX_BUFFERS_PER_Q;
3526 }
3527
3528 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3529                                     unsigned int qdio_err, int queue,
3530                                     int first_elem, int count,
3531                                     unsigned long card_ptr)
3532 {
3533         struct qeth_card *card = (struct qeth_card *)card_ptr;
3534
3535         QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3536         QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3537
3538         if (qeth_is_cq(card, queue))
3539                 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3540         else if (qdio_err)
3541                 qeth_schedule_recovery(card);
3542 }
3543
3544 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3545                                      unsigned int qdio_error, int __queue,
3546                                      int first_element, int count,
3547                                      unsigned long card_ptr)
3548 {
3549         struct qeth_card *card        = (struct qeth_card *) card_ptr;
3550         struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3551         struct qeth_qdio_out_buffer *buffer;
3552         int i;
3553
3554         QETH_CARD_TEXT(card, 6, "qdouhdl");
3555         if (qdio_error & QDIO_ERROR_FATAL) {
3556                 QETH_CARD_TEXT(card, 2, "achkcond");
3557                 netif_stop_queue(card->dev);
3558                 qeth_schedule_recovery(card);
3559                 return;
3560         }
3561
3562         for (i = first_element; i < (first_element + count); ++i) {
3563                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3564                 buffer = queue->bufs[bidx];
3565                 qeth_handle_send_error(card, buffer, qdio_error);
3566
3567                 if (queue->bufstates &&
3568                     (queue->bufstates[bidx].flags &
3569                      QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3570                         WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3571
3572                         if (atomic_cmpxchg(&buffer->state,
3573                                            QETH_QDIO_BUF_PRIMED,
3574                                            QETH_QDIO_BUF_PENDING) ==
3575                                 QETH_QDIO_BUF_PRIMED) {
3576                                 qeth_notify_skbs(queue, buffer,
3577                                                  TX_NOTIFY_PENDING);
3578                         }
3579                         QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3580
3581                         /* prepare the queue slot for re-use: */
3582                         qeth_scrub_qdio_buffer(buffer->buffer,
3583                                                QETH_MAX_BUFFER_ELEMENTS(card));
3584                         if (qeth_init_qdio_out_buf(queue, bidx)) {
3585                                 QETH_CARD_TEXT(card, 2, "outofbuf");
3586                                 qeth_schedule_recovery(card);
3587                         }
3588                 } else {
3589                         if (card->options.cq == QETH_CQ_ENABLED) {
3590                                 enum iucv_tx_notify n;
3591
3592                                 n = qeth_compute_cq_notification(
3593                                         buffer->buffer->element[15].sflags, 0);
3594                                 qeth_notify_skbs(queue, buffer, n);
3595                         }
3596
3597                         qeth_clear_output_buffer(queue, buffer);
3598                 }
3599                 qeth_cleanup_handled_pending(queue, bidx, 0);
3600         }
3601         atomic_sub(count, &queue->used_buffers);
3602         /* check if we need to do something on this outbound queue */
3603         if (card->info.type != QETH_CARD_TYPE_IQD)
3604                 qeth_check_outbound_queue(queue);
3605
3606         netif_wake_queue(queue->card->dev);
3607 }
3608
3609 /* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3610 static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
3611 {
3612         if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
3613                 return 2;
3614         return queue_num;
3615 }
3616
3617 /**
3618  * Note: Function assumes that we have 4 outbound queues.
3619  */
3620 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3621                             int ipv)
3622 {
3623         __be16 *tci;
3624         u8 tos;
3625
3626         switch (card->qdio.do_prio_queueing) {
3627         case QETH_PRIO_Q_ING_TOS:
3628         case QETH_PRIO_Q_ING_PREC:
3629                 switch (ipv) {
3630                 case 4:
3631                         tos = ipv4_get_dsfield(ip_hdr(skb));
3632                         break;
3633                 case 6:
3634                         tos = ipv6_get_dsfield(ipv6_hdr(skb));
3635                         break;
3636                 default:
3637                         return card->qdio.default_out_queue;
3638                 }
3639                 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3640                         return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
3641                 if (tos & IPTOS_MINCOST)
3642                         return qeth_cut_iqd_prio(card, 3);
3643                 if (tos & IPTOS_RELIABILITY)
3644                         return 2;
3645                 if (tos & IPTOS_THROUGHPUT)
3646                         return 1;
3647                 if (tos & IPTOS_LOWDELAY)
3648                         return 0;
3649                 break;
3650         case QETH_PRIO_Q_ING_SKB:
3651                 if (skb->priority > 5)
3652                         return 0;
3653                 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
3654         case QETH_PRIO_Q_ING_VLAN:
3655                 tci = &((struct ethhdr *)skb->data)->h_proto;
3656                 if (be16_to_cpu(*tci) == ETH_P_8021Q)
3657                         return qeth_cut_iqd_prio(card,
3658                         ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
3659                 break;
3660         default:
3661                 break;
3662         }
3663         return card->qdio.default_out_queue;
3664 }
3665 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3666
3667 /**
3668  * qeth_get_elements_for_frags() -      find number of SBALEs for skb frags.
3669  * @skb:                                SKB address
3670  *
3671  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3672  * fragmented part of the SKB. Returns zero for linear SKB.
3673  */
3674 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3675 {
3676         int cnt, elements = 0;
3677
3678         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3679                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
3680
3681                 elements += qeth_get_elements_for_range(
3682                         (addr_t)skb_frag_address(frag),
3683                         (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3684         }
3685         return elements;
3686 }
3687
3688 /**
3689  * qeth_count_elements() -      Counts the number of QDIO buffer elements needed
3690  *                              to transmit an skb.
3691  * @skb:                        the skb to operate on.
3692  * @data_offset:                skip this part of the skb's linear data
3693  *
3694  * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3695  * skb's data (both its linear part and paged fragments).
3696  */
3697 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3698 {
3699         unsigned int elements = qeth_get_elements_for_frags(skb);
3700         addr_t end = (addr_t)skb->data + skb_headlen(skb);
3701         addr_t start = (addr_t)skb->data + data_offset;
3702
3703         if (start != end)
3704                 elements += qeth_get_elements_for_range(start, end);
3705         return elements;
3706 }
3707 EXPORT_SYMBOL_GPL(qeth_count_elements);
3708
3709 #define QETH_HDR_CACHE_OBJ_SIZE         (sizeof(struct qeth_hdr_tso) + \
3710                                          MAX_TCP_HEADER)
3711
3712 /**
3713  * qeth_add_hw_header() - add a HW header to an skb.
3714  * @skb: skb that the HW header should be added to.
3715  * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3716  *       it contains a valid pointer to a qeth_hdr.
3717  * @hdr_len: length of the HW header.
3718  * @proto_len: length of protocol headers that need to be in same page as the
3719  *             HW header.
3720  *
3721  * Returns the pushed length. If the header can't be pushed on
3722  * (eg. because it would cross a page boundary), it is allocated from
3723  * the cache instead and 0 is returned.
3724  * The number of needed buffer elements is returned in @elements.
3725  * Error to create the hdr is indicated by returning with < 0.
3726  */
3727 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3728                               struct sk_buff *skb, struct qeth_hdr **hdr,
3729                               unsigned int hdr_len, unsigned int proto_len,
3730                               unsigned int *elements)
3731 {
3732         const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
3733         const unsigned int contiguous = proto_len ? proto_len : 1;
3734         unsigned int __elements;
3735         addr_t start, end;
3736         bool push_ok;
3737         int rc;
3738
3739 check_layout:
3740         start = (addr_t)skb->data - hdr_len;
3741         end = (addr_t)skb->data;
3742
3743         if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3744                 /* Push HW header into same page as first protocol header. */
3745                 push_ok = true;
3746                 /* ... but TSO always needs a separate element for headers: */
3747                 if (skb_is_gso(skb))
3748                         __elements = 1 + qeth_count_elements(skb, proto_len);
3749                 else
3750                         __elements = qeth_count_elements(skb, 0);
3751         } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
3752                 /* Push HW header into a new page. */
3753                 push_ok = true;
3754                 __elements = 1 + qeth_count_elements(skb, 0);
3755         } else {
3756                 /* Use header cache, copy protocol headers up. */
3757                 push_ok = false;
3758                 __elements = 1 + qeth_count_elements(skb, proto_len);
3759         }
3760
3761         /* Compress skb to fit into one IO buffer: */
3762         if (__elements > max_elements) {
3763                 if (!skb_is_nonlinear(skb)) {
3764                         /* Drop it, no easy way of shrinking it further. */
3765                         QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3766                                          max_elements, __elements, skb->len);
3767                         return -E2BIG;
3768                 }
3769
3770                 rc = skb_linearize(skb);
3771                 if (rc) {
3772                         QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3773                         return rc;
3774                 }
3775
3776                 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3777                 /* Linearization changed the layout, re-evaluate: */
3778                 goto check_layout;
3779         }
3780
3781         *elements = __elements;
3782         /* Add the header: */
3783         if (push_ok) {
3784                 *hdr = skb_push(skb, hdr_len);
3785                 return hdr_len;
3786         }
3787         /* fall back */
3788         if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3789                 return -E2BIG;
3790         *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3791         if (!*hdr)
3792                 return -ENOMEM;
3793         /* Copy protocol headers behind HW header: */
3794         skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3795         return 0;
3796 }
3797
3798 static void __qeth_fill_buffer(struct sk_buff *skb,
3799                                struct qeth_qdio_out_buffer *buf,
3800                                bool is_first_elem, unsigned int offset)
3801 {
3802         struct qdio_buffer *buffer = buf->buffer;
3803         int element = buf->next_element_to_fill;
3804         int length = skb_headlen(skb) - offset;
3805         char *data = skb->data + offset;
3806         int length_here, cnt;
3807
3808         /* map linear part into buffer element(s) */
3809         while (length > 0) {
3810                 /* length_here is the remaining amount of data in this page */
3811                 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3812                 if (length < length_here)
3813                         length_here = length;
3814
3815                 buffer->element[element].addr = data;
3816                 buffer->element[element].length = length_here;
3817                 length -= length_here;
3818                 if (is_first_elem) {
3819                         is_first_elem = false;
3820                         if (length || skb_is_nonlinear(skb))
3821                                 /* skb needs additional elements */
3822                                 buffer->element[element].eflags =
3823                                         SBAL_EFLAGS_FIRST_FRAG;
3824                         else
3825                                 buffer->element[element].eflags = 0;
3826                 } else {
3827                         buffer->element[element].eflags =
3828                                 SBAL_EFLAGS_MIDDLE_FRAG;
3829                 }
3830                 data += length_here;
3831                 element++;
3832         }
3833
3834         /* map page frags into buffer element(s) */
3835         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3836                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3837
3838                 data = skb_frag_address(frag);
3839                 length = skb_frag_size(frag);
3840                 while (length > 0) {
3841                         length_here = PAGE_SIZE -
3842                                 ((unsigned long) data % PAGE_SIZE);
3843                         if (length < length_here)
3844                                 length_here = length;
3845
3846                         buffer->element[element].addr = data;
3847                         buffer->element[element].length = length_here;
3848                         buffer->element[element].eflags =
3849                                 SBAL_EFLAGS_MIDDLE_FRAG;
3850                         length -= length_here;
3851                         data += length_here;
3852                         element++;
3853                 }
3854         }
3855
3856         if (buffer->element[element - 1].eflags)
3857                 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3858         buf->next_element_to_fill = element;
3859 }
3860
3861 /**
3862  * qeth_fill_buffer() - map skb into an output buffer
3863  * @queue:      QDIO queue to submit the buffer on
3864  * @buf:        buffer to transport the skb
3865  * @skb:        skb to map into the buffer
3866  * @hdr:        qeth_hdr for this skb. Either at skb->data, or allocated
3867  *              from qeth_core_header_cache.
3868  * @offset:     when mapping the skb, start at skb->data + offset
3869  * @hd_len:     if > 0, build a dedicated header element of this size
3870  */
3871 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3872                             struct qeth_qdio_out_buffer *buf,
3873                             struct sk_buff *skb, struct qeth_hdr *hdr,
3874                             unsigned int offset, unsigned int hd_len)
3875 {
3876         struct qdio_buffer *buffer = buf->buffer;
3877         bool is_first_elem = true;
3878
3879         __skb_queue_tail(&buf->skb_list, skb);
3880
3881         /* build dedicated header element */
3882         if (hd_len) {
3883                 int element = buf->next_element_to_fill;
3884                 is_first_elem = false;
3885
3886                 buffer->element[element].addr = hdr;
3887                 buffer->element[element].length = hd_len;
3888                 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3889                 /* remember to free cache-allocated qeth_hdr: */
3890                 buf->is_header[element] = ((void *)hdr != skb->data);
3891                 buf->next_element_to_fill++;
3892         }
3893
3894         __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3895
3896         if (!queue->do_pack) {
3897                 QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
3898         } else {
3899                 QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
3900
3901                 QETH_TXQ_STAT_INC(queue, skbs_pack);
3902                 /* If the buffer still has free elements, keep using it. */
3903                 if (buf->next_element_to_fill <
3904                     QETH_MAX_BUFFER_ELEMENTS(queue->card))
3905                         return 0;
3906         }
3907
3908         /* flush out the buffer */
3909         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
3910         queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3911                                   QDIO_MAX_BUFFERS_PER_Q;
3912         return 1;
3913 }
3914
3915 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
3916                                     struct sk_buff *skb, struct qeth_hdr *hdr,
3917                                     unsigned int offset, unsigned int hd_len)
3918 {
3919         int index = queue->next_buf_to_fill;
3920         struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
3921
3922         /*
3923          * check if buffer is empty to make sure that we do not 'overtake'
3924          * ourselves and try to fill a buffer that is already primed
3925          */
3926         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3927                 return -EBUSY;
3928         qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3929         qeth_flush_buffers(queue, index, 1);
3930         return 0;
3931 }
3932
3933 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3934                         struct sk_buff *skb, struct qeth_hdr *hdr,
3935                         unsigned int offset, unsigned int hd_len,
3936                         int elements_needed)
3937 {
3938         struct qeth_qdio_out_buffer *buffer;
3939         int start_index;
3940         int flush_count = 0;
3941         int do_pack = 0;
3942         int tmp;
3943         int rc = 0;
3944
3945         /* spin until we get the queue ... */
3946         while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3947                               QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3948         start_index = queue->next_buf_to_fill;
3949         buffer = queue->bufs[queue->next_buf_to_fill];
3950         /*
3951          * check if buffer is empty to make sure that we do not 'overtake'
3952          * ourselves and try to fill a buffer that is already primed
3953          */
3954         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3955                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3956                 return -EBUSY;
3957         }
3958         /* check if we need to switch packing state of this queue */
3959         qeth_switch_to_packing_if_needed(queue);
3960         if (queue->do_pack) {
3961                 do_pack = 1;
3962                 /* does packet fit in current buffer? */
3963                 if ((QETH_MAX_BUFFER_ELEMENTS(card) -
3964                     buffer->next_element_to_fill) < elements_needed) {
3965                         /* ... no -> set state PRIMED */
3966                         atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3967                         flush_count++;
3968                         queue->next_buf_to_fill =
3969                                 (queue->next_buf_to_fill + 1) %
3970                                 QDIO_MAX_BUFFERS_PER_Q;
3971                         buffer = queue->bufs[queue->next_buf_to_fill];
3972                         /* we did a step forward, so check buffer state
3973                          * again */
3974                         if (atomic_read(&buffer->state) !=
3975                             QETH_QDIO_BUF_EMPTY) {
3976                                 qeth_flush_buffers(queue, start_index,
3977                                                            flush_count);
3978                                 atomic_set(&queue->state,
3979                                                 QETH_OUT_Q_UNLOCKED);
3980                                 rc = -EBUSY;
3981                                 goto out;
3982                         }
3983                 }
3984         }
3985
3986         flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
3987                                         hd_len);
3988         if (flush_count)
3989                 qeth_flush_buffers(queue, start_index, flush_count);
3990         else if (!atomic_read(&queue->set_pci_flags_count))
3991                 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3992         /*
3993          * queue->state will go from LOCKED -> UNLOCKED or from
3994          * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3995          * (switch packing state or flush buffer to get another pci flag out).
3996          * In that case we will enter this loop
3997          */
3998         while (atomic_dec_return(&queue->state)) {
3999                 start_index = queue->next_buf_to_fill;
4000                 /* check if we can go back to non-packing state */
4001                 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4002                 /*
4003                  * check if we need to flush a packing buffer to get a pci
4004                  * flag out on the queue
4005                  */
4006                 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4007                         tmp = qeth_prep_flush_pack_buffer(queue);
4008                 if (tmp) {
4009                         qeth_flush_buffers(queue, start_index, tmp);
4010                         flush_count += tmp;
4011                 }
4012         }
4013 out:
4014         /* at this point the queue is UNLOCKED again */
4015         if (do_pack)
4016                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4017
4018         return rc;
4019 }
4020 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4021
4022 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4023                               unsigned int payload_len, struct sk_buff *skb,
4024                               unsigned int proto_len)
4025 {
4026         struct qeth_hdr_ext_tso *ext = &hdr->ext;
4027
4028         ext->hdr_tot_len = sizeof(*ext);
4029         ext->imb_hdr_no = 1;
4030         ext->hdr_type = 1;
4031         ext->hdr_version = 1;
4032         ext->hdr_len = 28;
4033         ext->payload_len = payload_len;
4034         ext->mss = skb_shinfo(skb)->gso_size;
4035         ext->dg_hdr_len = proto_len;
4036 }
4037
4038 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4039               struct qeth_qdio_out_q *queue, int ipv, int cast_type,
4040               void (*fill_header)(struct qeth_qdio_out_q *queue,
4041                                   struct qeth_hdr *hdr, struct sk_buff *skb,
4042                                   int ipv, int cast_type,
4043                                   unsigned int data_len))
4044 {
4045         unsigned int proto_len, hw_hdr_len;
4046         unsigned int frame_len = skb->len;
4047         bool is_tso = skb_is_gso(skb);
4048         unsigned int data_offset = 0;
4049         struct qeth_hdr *hdr = NULL;
4050         unsigned int hd_len = 0;
4051         unsigned int elements;
4052         int push_len, rc;
4053         bool is_sg;
4054
4055         if (is_tso) {
4056                 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4057                 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4058         } else {
4059                 hw_hdr_len = sizeof(struct qeth_hdr);
4060                 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4061         }
4062
4063         rc = skb_cow_head(skb, hw_hdr_len);
4064         if (rc)
4065                 return rc;
4066
4067         push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4068                                       &elements);
4069         if (push_len < 0)
4070                 return push_len;
4071         if (is_tso || !push_len) {
4072                 /* HW header needs its own buffer element. */
4073                 hd_len = hw_hdr_len + proto_len;
4074                 data_offset = push_len + proto_len;
4075         }
4076         memset(hdr, 0, hw_hdr_len);
4077         fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
4078         if (is_tso)
4079                 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4080                                   frame_len - proto_len, skb, proto_len);
4081
4082         is_sg = skb_is_nonlinear(skb);
4083         if (IS_IQD(card)) {
4084                 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
4085                                               hd_len);
4086         } else {
4087                 /* TODO: drop skb_orphan() once TX completion is fast enough */
4088                 skb_orphan(skb);
4089                 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4090                                          hd_len, elements);
4091         }
4092
4093         if (!rc) {
4094                 QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
4095                 if (is_sg)
4096                         QETH_TXQ_STAT_INC(queue, skbs_sg);
4097                 if (is_tso) {
4098                         QETH_TXQ_STAT_INC(queue, skbs_tso);
4099                         QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
4100                 }
4101         } else {
4102                 if (!push_len)
4103                         kmem_cache_free(qeth_core_header_cache, hdr);
4104                 if (rc == -EBUSY)
4105                         /* roll back to ETH header */
4106                         skb_pull(skb, push_len);
4107         }
4108         return rc;
4109 }
4110 EXPORT_SYMBOL_GPL(qeth_xmit);
4111
4112 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4113                 struct qeth_reply *reply, unsigned long data)
4114 {
4115         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4116         struct qeth_ipacmd_setadpparms *setparms;
4117
4118         QETH_CARD_TEXT(card, 4, "prmadpcb");
4119
4120         setparms = &(cmd->data.setadapterparms);
4121         if (qeth_setadpparms_inspect_rc(cmd)) {
4122                 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4123                 setparms->data.mode = SET_PROMISC_MODE_OFF;
4124         }
4125         card->info.promisc_mode = setparms->data.mode;
4126         return (cmd->hdr.return_code) ? -EIO : 0;
4127 }
4128
4129 void qeth_setadp_promisc_mode(struct qeth_card *card)
4130 {
4131         enum qeth_ipa_promisc_modes mode;
4132         struct net_device *dev = card->dev;
4133         struct qeth_cmd_buffer *iob;
4134         struct qeth_ipa_cmd *cmd;
4135
4136         QETH_CARD_TEXT(card, 4, "setprom");
4137
4138         if (((dev->flags & IFF_PROMISC) &&
4139              (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
4140             (!(dev->flags & IFF_PROMISC) &&
4141              (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
4142                 return;
4143         mode = SET_PROMISC_MODE_OFF;
4144         if (dev->flags & IFF_PROMISC)
4145                 mode = SET_PROMISC_MODE_ON;
4146         QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4147
4148         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4149                         sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
4150         if (!iob)
4151                 return;
4152         cmd = __ipa_cmd(iob);
4153         cmd->data.setadapterparms.data.mode = mode;
4154         qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4155 }
4156 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4157
4158 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4159                 struct qeth_reply *reply, unsigned long data)
4160 {
4161         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4162         struct qeth_ipacmd_setadpparms *adp_cmd;
4163
4164         QETH_CARD_TEXT(card, 4, "chgmaccb");
4165         if (qeth_setadpparms_inspect_rc(cmd))
4166                 return -EIO;
4167
4168         adp_cmd = &cmd->data.setadapterparms;
4169         if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4170                 return -EADDRNOTAVAIL;
4171
4172         if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4173             !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4174                 return -EADDRNOTAVAIL;
4175
4176         ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4177         return 0;
4178 }
4179
4180 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4181 {
4182         int rc;
4183         struct qeth_cmd_buffer *iob;
4184         struct qeth_ipa_cmd *cmd;
4185
4186         QETH_CARD_TEXT(card, 4, "chgmac");
4187
4188         iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4189                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4190                                    sizeof(struct qeth_change_addr));
4191         if (!iob)
4192                 return -ENOMEM;
4193         cmd = __ipa_cmd(iob);
4194         cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4195         cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4196         ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4197                         card->dev->dev_addr);
4198         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4199                                NULL);
4200         return rc;
4201 }
4202 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4203
4204 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4205                 struct qeth_reply *reply, unsigned long data)
4206 {
4207         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4208         struct qeth_set_access_ctrl *access_ctrl_req;
4209         int fallback = *(int *)reply->param;
4210
4211         QETH_CARD_TEXT(card, 4, "setaccb");
4212         if (cmd->hdr.return_code)
4213                 return -EIO;
4214         qeth_setadpparms_inspect_rc(cmd);
4215
4216         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4217         QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4218         QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4219         QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
4220                 cmd->data.setadapterparms.hdr.return_code);
4221         if (cmd->data.setadapterparms.hdr.return_code !=
4222                                                 SET_ACCESS_CTRL_RC_SUCCESS)
4223                 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4224                                  access_ctrl_req->subcmd_code, CARD_DEVID(card),
4225                                  cmd->data.setadapterparms.hdr.return_code);
4226         switch (cmd->data.setadapterparms.hdr.return_code) {
4227         case SET_ACCESS_CTRL_RC_SUCCESS:
4228                 if (card->options.isolation == ISOLATION_MODE_NONE) {
4229                         dev_info(&card->gdev->dev,
4230                             "QDIO data connection isolation is deactivated\n");
4231                 } else {
4232                         dev_info(&card->gdev->dev,
4233                             "QDIO data connection isolation is activated\n");
4234                 }
4235                 break;
4236         case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4237                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4238                                  CARD_DEVID(card));
4239                 if (fallback)
4240                         card->options.isolation = card->options.prev_isolation;
4241                 break;
4242         case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4243                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4244                                  CARD_DEVID(card));
4245                 if (fallback)
4246                         card->options.isolation = card->options.prev_isolation;
4247                 break;
4248         case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4249                 dev_err(&card->gdev->dev, "Adapter does not "
4250                         "support QDIO data connection isolation\n");
4251                 break;
4252         case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4253                 dev_err(&card->gdev->dev,
4254                         "Adapter is dedicated. "
4255                         "QDIO data connection isolation not supported\n");
4256                 if (fallback)
4257                         card->options.isolation = card->options.prev_isolation;
4258                 break;
4259         case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4260                 dev_err(&card->gdev->dev,
4261                         "TSO does not permit QDIO data connection isolation\n");
4262                 if (fallback)
4263                         card->options.isolation = card->options.prev_isolation;
4264                 break;
4265         case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4266                 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4267                         "support reflective relay mode\n");
4268                 if (fallback)
4269                         card->options.isolation = card->options.prev_isolation;
4270                 break;
4271         case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4272                 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4273                                         "enabled at the adjacent switch port");
4274                 if (fallback)
4275                         card->options.isolation = card->options.prev_isolation;
4276                 break;
4277         case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4278                 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4279                                         "at the adjacent switch failed\n");
4280                 break;
4281         default:
4282                 /* this should never happen */
4283                 if (fallback)
4284                         card->options.isolation = card->options.prev_isolation;
4285                 break;
4286         }
4287         return (cmd->hdr.return_code) ? -EIO : 0;
4288 }
4289
4290 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4291                 enum qeth_ipa_isolation_modes isolation, int fallback)
4292 {
4293         int rc;
4294         struct qeth_cmd_buffer *iob;
4295         struct qeth_ipa_cmd *cmd;
4296         struct qeth_set_access_ctrl *access_ctrl_req;
4297
4298         QETH_CARD_TEXT(card, 4, "setacctl");
4299
4300         QETH_DBF_TEXT_(SETUP, 2, "setacctl");
4301         QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
4302
4303         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4304                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4305                                    sizeof(struct qeth_set_access_ctrl));
4306         if (!iob)
4307                 return -ENOMEM;
4308         cmd = __ipa_cmd(iob);
4309         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4310         access_ctrl_req->subcmd_code = isolation;
4311
4312         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4313                                &fallback);
4314         QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
4315         return rc;
4316 }
4317
4318 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4319 {
4320         int rc = 0;
4321
4322         QETH_CARD_TEXT(card, 4, "setactlo");
4323
4324         if ((card->info.type == QETH_CARD_TYPE_OSD ||
4325              card->info.type == QETH_CARD_TYPE_OSX) &&
4326              qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4327                 rc = qeth_setadpparms_set_access_ctrl(card,
4328                         card->options.isolation, fallback);
4329                 if (rc) {
4330                         QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4331                                          rc, CARD_DEVID(card));
4332                         rc = -EOPNOTSUPP;
4333                 }
4334         } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4335                 card->options.isolation = ISOLATION_MODE_NONE;
4336
4337                 dev_err(&card->gdev->dev, "Adapter does not "
4338                         "support QDIO data connection isolation\n");
4339                 rc = -EOPNOTSUPP;
4340         }
4341         return rc;
4342 }
4343 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4344
4345 void qeth_tx_timeout(struct net_device *dev)
4346 {
4347         struct qeth_card *card;
4348
4349         card = dev->ml_priv;
4350         QETH_CARD_TEXT(card, 4, "txtimeo");
4351         QETH_CARD_STAT_INC(card, tx_errors);
4352         qeth_schedule_recovery(card);
4353 }
4354 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4355
4356 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4357 {
4358         struct qeth_card *card = dev->ml_priv;
4359         int rc = 0;
4360
4361         switch (regnum) {
4362         case MII_BMCR: /* Basic mode control register */
4363                 rc = BMCR_FULLDPLX;
4364                 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4365                     (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4366                     (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4367                     (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4368                         rc |= BMCR_SPEED100;
4369                 break;
4370         case MII_BMSR: /* Basic mode status register */
4371                 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4372                      BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4373                      BMSR_100BASE4;
4374                 break;
4375         case MII_PHYSID1: /* PHYS ID 1 */
4376                 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4377                      dev->dev_addr[2];
4378                 rc = (rc >> 5) & 0xFFFF;
4379                 break;
4380         case MII_PHYSID2: /* PHYS ID 2 */
4381                 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4382                 break;
4383         case MII_ADVERTISE: /* Advertisement control reg */
4384                 rc = ADVERTISE_ALL;
4385                 break;
4386         case MII_LPA: /* Link partner ability reg */
4387                 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4388                      LPA_100BASE4 | LPA_LPACK;
4389                 break;
4390         case MII_EXPANSION: /* Expansion register */
4391                 break;
4392         case MII_DCOUNTER: /* disconnect counter */
4393                 break;
4394         case MII_FCSCOUNTER: /* false carrier counter */
4395                 break;
4396         case MII_NWAYTEST: /* N-way auto-neg test register */
4397                 break;
4398         case MII_RERRCOUNTER: /* rx error counter */
4399                 rc = card->stats.rx_errors;
4400                 break;
4401         case MII_SREVISION: /* silicon revision */
4402                 break;
4403         case MII_RESV1: /* reserved 1 */
4404                 break;
4405         case MII_LBRERROR: /* loopback, rx, bypass error */
4406                 break;
4407         case MII_PHYADDR: /* physical address */
4408                 break;
4409         case MII_RESV2: /* reserved 2 */
4410                 break;
4411         case MII_TPISTATUS: /* TPI status for 10mbps */
4412                 break;
4413         case MII_NCONFIG: /* network interface config */
4414                 break;
4415         default:
4416                 break;
4417         }
4418         return rc;
4419 }
4420
4421 static int qeth_snmp_command_cb(struct qeth_card *card,
4422                 struct qeth_reply *reply, unsigned long sdata)
4423 {
4424         struct qeth_ipa_cmd *cmd;
4425         struct qeth_arp_query_info *qinfo;
4426         unsigned char *data;
4427         void *snmp_data;
4428         __u16 data_len;
4429
4430         QETH_CARD_TEXT(card, 3, "snpcmdcb");
4431
4432         cmd = (struct qeth_ipa_cmd *) sdata;
4433         data = (unsigned char *)((char *)cmd - reply->offset);
4434         qinfo = (struct qeth_arp_query_info *) reply->param;
4435
4436         if (cmd->hdr.return_code) {
4437                 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4438                 return -EIO;
4439         }
4440         if (cmd->data.setadapterparms.hdr.return_code) {
4441                 cmd->hdr.return_code =
4442                         cmd->data.setadapterparms.hdr.return_code;
4443                 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4444                 return -EIO;
4445         }
4446         data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
4447         if (cmd->data.setadapterparms.hdr.seq_no == 1) {
4448                 snmp_data = &cmd->data.setadapterparms.data.snmp;
4449                 data_len -= offsetof(struct qeth_ipa_cmd,
4450                                      data.setadapterparms.data.snmp);
4451         } else {
4452                 snmp_data = &cmd->data.setadapterparms.data.snmp.request;
4453                 data_len -= offsetof(struct qeth_ipa_cmd,
4454                                      data.setadapterparms.data.snmp.request);
4455         }
4456
4457         /* check if there is enough room in userspace */
4458         if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4459                 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4460                 return -ENOSPC;
4461         }
4462         QETH_CARD_TEXT_(card, 4, "snore%i",
4463                        cmd->data.setadapterparms.hdr.used_total);
4464         QETH_CARD_TEXT_(card, 4, "sseqn%i",
4465                 cmd->data.setadapterparms.hdr.seq_no);
4466         /*copy entries to user buffer*/
4467         memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4468         qinfo->udata_offset += data_len;
4469
4470         /* check if all replies received ... */
4471                 QETH_CARD_TEXT_(card, 4, "srtot%i",
4472                                cmd->data.setadapterparms.hdr.used_total);
4473                 QETH_CARD_TEXT_(card, 4, "srseq%i",
4474                                cmd->data.setadapterparms.hdr.seq_no);
4475         if (cmd->data.setadapterparms.hdr.seq_no <
4476             cmd->data.setadapterparms.hdr.used_total)
4477                 return 1;
4478         return 0;
4479 }
4480
4481 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4482 {
4483         struct qeth_cmd_buffer *iob;
4484         struct qeth_ipa_cmd *cmd;
4485         struct qeth_snmp_ureq *ureq;
4486         unsigned int req_len;
4487         struct qeth_arp_query_info qinfo = {0, };
4488         int rc = 0;
4489
4490         QETH_CARD_TEXT(card, 3, "snmpcmd");
4491
4492         if (card->info.guestlan)
4493                 return -EOPNOTSUPP;
4494
4495         if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4496             IS_LAYER3(card))
4497                 return -EOPNOTSUPP;
4498
4499         /* skip 4 bytes (data_len struct member) to get req_len */
4500         if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
4501                 return -EFAULT;
4502         if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
4503                        sizeof(struct qeth_ipacmd_hdr) -
4504                        sizeof(struct qeth_ipacmd_setadpparms_hdr)))
4505                 return -EINVAL;
4506         ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
4507         if (IS_ERR(ureq)) {
4508                 QETH_CARD_TEXT(card, 2, "snmpnome");
4509                 return PTR_ERR(ureq);
4510         }
4511         qinfo.udata_len = ureq->hdr.data_len;
4512         qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4513         if (!qinfo.udata) {
4514                 kfree(ureq);
4515                 return -ENOMEM;
4516         }
4517         qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4518
4519         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
4520                                    QETH_SNMP_SETADP_CMDLENGTH + req_len);
4521         if (!iob) {
4522                 rc = -ENOMEM;
4523                 goto out;
4524         }
4525
4526         /* for large requests, fix-up the length fields: */
4527         qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len);
4528
4529         cmd = __ipa_cmd(iob);
4530         memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
4531         rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4532         if (rc)
4533                 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4534                                  CARD_DEVID(card), rc);
4535         else {
4536                 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4537                         rc = -EFAULT;
4538         }
4539 out:
4540         kfree(ureq);
4541         kfree(qinfo.udata);
4542         return rc;
4543 }
4544
4545 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4546                 struct qeth_reply *reply, unsigned long data)
4547 {
4548         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4549         struct qeth_qoat_priv *priv;
4550         char *resdata;
4551         int resdatalen;
4552
4553         QETH_CARD_TEXT(card, 3, "qoatcb");
4554         if (qeth_setadpparms_inspect_rc(cmd))
4555                 return -EIO;
4556
4557         priv = (struct qeth_qoat_priv *)reply->param;
4558         resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4559         resdata = (char *)data + 28;
4560
4561         if (resdatalen > (priv->buffer_len - priv->response_len))
4562                 return -ENOSPC;
4563
4564         memcpy((priv->buffer + priv->response_len), resdata,
4565                 resdatalen);
4566         priv->response_len += resdatalen;
4567
4568         if (cmd->data.setadapterparms.hdr.seq_no <
4569             cmd->data.setadapterparms.hdr.used_total)
4570                 return 1;
4571         return 0;
4572 }
4573
4574 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4575 {
4576         int rc = 0;
4577         struct qeth_cmd_buffer *iob;
4578         struct qeth_ipa_cmd *cmd;
4579         struct qeth_query_oat *oat_req;
4580         struct qeth_query_oat_data oat_data;
4581         struct qeth_qoat_priv priv;
4582         void __user *tmp;
4583
4584         QETH_CARD_TEXT(card, 3, "qoatcmd");
4585
4586         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4587                 rc = -EOPNOTSUPP;
4588                 goto out;
4589         }
4590
4591         if (copy_from_user(&oat_data, udata,
4592             sizeof(struct qeth_query_oat_data))) {
4593                         rc = -EFAULT;
4594                         goto out;
4595         }
4596
4597         priv.buffer_len = oat_data.buffer_len;
4598         priv.response_len = 0;
4599         priv.buffer = vzalloc(oat_data.buffer_len);
4600         if (!priv.buffer) {
4601                 rc = -ENOMEM;
4602                 goto out;
4603         }
4604
4605         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4606                                    sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4607                                    sizeof(struct qeth_query_oat));
4608         if (!iob) {
4609                 rc = -ENOMEM;
4610                 goto out_free;
4611         }
4612         cmd = __ipa_cmd(iob);
4613         oat_req = &cmd->data.setadapterparms.data.query_oat;
4614         oat_req->subcmd_code = oat_data.command;
4615
4616         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4617                                &priv);
4618         if (!rc) {
4619                 if (is_compat_task())
4620                         tmp = compat_ptr(oat_data.ptr);
4621                 else
4622                         tmp = (void __user *)(unsigned long)oat_data.ptr;
4623
4624                 if (copy_to_user(tmp, priv.buffer,
4625                     priv.response_len)) {
4626                         rc = -EFAULT;
4627                         goto out_free;
4628                 }
4629
4630                 oat_data.response_len = priv.response_len;
4631
4632                 if (copy_to_user(udata, &oat_data,
4633                     sizeof(struct qeth_query_oat_data)))
4634                         rc = -EFAULT;
4635         }
4636
4637 out_free:
4638         vfree(priv.buffer);
4639 out:
4640         return rc;
4641 }
4642
4643 static int qeth_query_card_info_cb(struct qeth_card *card,
4644                                    struct qeth_reply *reply, unsigned long data)
4645 {
4646         struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4647         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4648         struct qeth_query_card_info *card_info;
4649
4650         QETH_CARD_TEXT(card, 2, "qcrdincb");
4651         if (qeth_setadpparms_inspect_rc(cmd))
4652                 return -EIO;
4653
4654         card_info = &cmd->data.setadapterparms.data.card_info;
4655         carrier_info->card_type = card_info->card_type;
4656         carrier_info->port_mode = card_info->port_mode;
4657         carrier_info->port_speed = card_info->port_speed;
4658         return 0;
4659 }
4660
4661 int qeth_query_card_info(struct qeth_card *card,
4662                          struct carrier_info *carrier_info)
4663 {
4664         struct qeth_cmd_buffer *iob;
4665
4666         QETH_CARD_TEXT(card, 2, "qcrdinfo");
4667         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4668                 return -EOPNOTSUPP;
4669         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
4670                 sizeof(struct qeth_ipacmd_setadpparms_hdr));
4671         if (!iob)
4672                 return -ENOMEM;
4673         return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4674                                         (void *)carrier_info);
4675 }
4676
4677 /**
4678  * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4679  * @card: pointer to a qeth_card
4680  *
4681  * Returns
4682  *      0, if a MAC address has been set for the card's netdevice
4683  *      a return code, for various error conditions
4684  */
4685 int qeth_vm_request_mac(struct qeth_card *card)
4686 {
4687         struct diag26c_mac_resp *response;
4688         struct diag26c_mac_req *request;
4689         struct ccw_dev_id id;
4690         int rc;
4691
4692         QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
4693
4694         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4695         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4696         if (!request || !response) {
4697                 rc = -ENOMEM;
4698                 goto out;
4699         }
4700
4701         ccw_device_get_id(CARD_DDEV(card), &id);
4702         request->resp_buf_len = sizeof(*response);
4703         request->resp_version = DIAG26C_VERSION2;
4704         request->op_code = DIAG26C_GET_MAC;
4705         request->devno = id.devno;
4706
4707         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4708         rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4709         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4710         if (rc)
4711                 goto out;
4712         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4713
4714         if (request->resp_buf_len < sizeof(*response) ||
4715             response->version != request->resp_version) {
4716                 rc = -EIO;
4717                 QETH_DBF_TEXT(SETUP, 2, "badresp");
4718                 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
4719                              sizeof(request->resp_buf_len));
4720         } else if (!is_valid_ether_addr(response->mac)) {
4721                 rc = -EINVAL;
4722                 QETH_DBF_TEXT(SETUP, 2, "badmac");
4723                 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
4724         } else {
4725                 ether_addr_copy(card->dev->dev_addr, response->mac);
4726         }
4727
4728 out:
4729         kfree(response);
4730         kfree(request);
4731         return rc;
4732 }
4733 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4734
4735 static int qeth_get_qdio_q_format(struct qeth_card *card)
4736 {
4737         if (card->info.type == QETH_CARD_TYPE_IQD)
4738                 return QDIO_IQDIO_QFMT;
4739         else
4740                 return QDIO_QETH_QFMT;
4741 }
4742
4743 static void qeth_determine_capabilities(struct qeth_card *card)
4744 {
4745         int rc;
4746         int length;
4747         char *prcd;
4748         struct ccw_device *ddev;
4749         int ddev_offline = 0;
4750
4751         QETH_DBF_TEXT(SETUP, 2, "detcapab");
4752         ddev = CARD_DDEV(card);
4753         if (!ddev->online) {
4754                 ddev_offline = 1;
4755                 rc = ccw_device_set_online(ddev);
4756                 if (rc) {
4757                         QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4758                         goto out;
4759                 }
4760         }
4761
4762         rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4763         if (rc) {
4764                 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4765                                  CARD_DEVID(card), rc);
4766                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4767                 goto out_offline;
4768         }
4769         qeth_configure_unitaddr(card, prcd);
4770         if (ddev_offline)
4771                 qeth_configure_blkt_default(card, prcd);
4772         kfree(prcd);
4773
4774         rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4775         if (rc)
4776                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4777
4778         QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4779         QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
4780         QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
4781         QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
4782         QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4783         if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4784             ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4785             ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4786                 dev_info(&card->gdev->dev,
4787                         "Completion Queueing supported\n");
4788         } else {
4789                 card->options.cq = QETH_CQ_NOTAVAILABLE;
4790         }
4791
4792
4793 out_offline:
4794         if (ddev_offline == 1)
4795                 ccw_device_set_offline(ddev);
4796 out:
4797         return;
4798 }
4799
4800 static void qeth_qdio_establish_cq(struct qeth_card *card,
4801                                    struct qdio_buffer **in_sbal_ptrs,
4802                                    void (**queue_start_poll)
4803                                         (struct ccw_device *, int,
4804                                          unsigned long))
4805 {
4806         int i;
4807
4808         if (card->options.cq == QETH_CQ_ENABLED) {
4809                 int offset = QDIO_MAX_BUFFERS_PER_Q *
4810                              (card->qdio.no_in_queues - 1);
4811                 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4812                         in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4813                                 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4814                 }
4815
4816                 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4817         }
4818 }
4819
4820 static int qeth_qdio_establish(struct qeth_card *card)
4821 {
4822         struct qdio_initialize init_data;
4823         char *qib_param_field;
4824         struct qdio_buffer **in_sbal_ptrs;
4825         void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4826         struct qdio_buffer **out_sbal_ptrs;
4827         int i, j, k;
4828         int rc = 0;
4829
4830         QETH_DBF_TEXT(SETUP, 2, "qdioest");
4831
4832         qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
4833                                   GFP_KERNEL);
4834         if (!qib_param_field) {
4835                 rc =  -ENOMEM;
4836                 goto out_free_nothing;
4837         }
4838
4839         qeth_create_qib_param_field(card, qib_param_field);
4840         qeth_create_qib_param_field_blkt(card, qib_param_field);
4841
4842         in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4843                                sizeof(void *),
4844                                GFP_KERNEL);
4845         if (!in_sbal_ptrs) {
4846                 rc = -ENOMEM;
4847                 goto out_free_qib_param;
4848         }
4849         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4850                 in_sbal_ptrs[i] = (struct qdio_buffer *)
4851                         virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4852         }
4853
4854         queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4855                                    GFP_KERNEL);
4856         if (!queue_start_poll) {
4857                 rc = -ENOMEM;
4858                 goto out_free_in_sbals;
4859         }
4860         for (i = 0; i < card->qdio.no_in_queues; ++i)
4861                 queue_start_poll[i] = qeth_qdio_start_poll;
4862
4863         qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4864
4865         out_sbal_ptrs =
4866                 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4867                         sizeof(void *),
4868                         GFP_KERNEL);
4869         if (!out_sbal_ptrs) {
4870                 rc = -ENOMEM;
4871                 goto out_free_queue_start_poll;
4872         }
4873         for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4874                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4875                         out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4876                                 card->qdio.out_qs[i]->bufs[j]->buffer);
4877                 }
4878
4879         memset(&init_data, 0, sizeof(struct qdio_initialize));
4880         init_data.cdev                   = CARD_DDEV(card);
4881         init_data.q_format               = qeth_get_qdio_q_format(card);
4882         init_data.qib_param_field_format = 0;
4883         init_data.qib_param_field        = qib_param_field;
4884         init_data.no_input_qs            = card->qdio.no_in_queues;
4885         init_data.no_output_qs           = card->qdio.no_out_queues;
4886         init_data.input_handler          = qeth_qdio_input_handler;
4887         init_data.output_handler         = qeth_qdio_output_handler;
4888         init_data.queue_start_poll_array = queue_start_poll;
4889         init_data.int_parm               = (unsigned long) card;
4890         init_data.input_sbal_addr_array  = in_sbal_ptrs;
4891         init_data.output_sbal_addr_array = out_sbal_ptrs;
4892         init_data.output_sbal_state_array = card->qdio.out_bufstates;
4893         init_data.scan_threshold =
4894                 (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
4895
4896         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4897                 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4898                 rc = qdio_allocate(&init_data);
4899                 if (rc) {
4900                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4901                         goto out;
4902                 }
4903                 rc = qdio_establish(&init_data);
4904                 if (rc) {
4905                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4906                         qdio_free(CARD_DDEV(card));
4907                 }
4908         }
4909
4910         switch (card->options.cq) {
4911         case QETH_CQ_ENABLED:
4912                 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4913                 break;
4914         case QETH_CQ_DISABLED:
4915                 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4916                 break;
4917         default:
4918                 break;
4919         }
4920 out:
4921         kfree(out_sbal_ptrs);
4922 out_free_queue_start_poll:
4923         kfree(queue_start_poll);
4924 out_free_in_sbals:
4925         kfree(in_sbal_ptrs);
4926 out_free_qib_param:
4927         kfree(qib_param_field);
4928 out_free_nothing:
4929         return rc;
4930 }
4931
4932 static void qeth_core_free_card(struct qeth_card *card)
4933 {
4934         QETH_DBF_TEXT(SETUP, 2, "freecrd");
4935         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
4936         qeth_clean_channel(&card->read);
4937         qeth_clean_channel(&card->write);
4938         qeth_clean_channel(&card->data);
4939         destroy_workqueue(card->event_wq);
4940         qeth_free_qdio_buffers(card);
4941         unregister_service_level(&card->qeth_service_level);
4942         dev_set_drvdata(&card->gdev->dev, NULL);
4943         kfree(card);
4944 }
4945
4946 void qeth_trace_features(struct qeth_card *card)
4947 {
4948         QETH_CARD_TEXT(card, 2, "features");
4949         QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4950         QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4951         QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4952         QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4953                       sizeof(card->info.diagass_support));
4954 }
4955 EXPORT_SYMBOL_GPL(qeth_trace_features);
4956
4957 static struct ccw_device_id qeth_ids[] = {
4958         {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4959                                         .driver_info = QETH_CARD_TYPE_OSD},
4960         {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4961                                         .driver_info = QETH_CARD_TYPE_IQD},
4962         {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4963                                         .driver_info = QETH_CARD_TYPE_OSN},
4964         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4965                                         .driver_info = QETH_CARD_TYPE_OSM},
4966         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4967                                         .driver_info = QETH_CARD_TYPE_OSX},
4968         {},
4969 };
4970 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4971
4972 static struct ccw_driver qeth_ccw_driver = {
4973         .driver = {
4974                 .owner = THIS_MODULE,
4975                 .name = "qeth",
4976         },
4977         .ids = qeth_ids,
4978         .probe = ccwgroup_probe_ccwdev,
4979         .remove = ccwgroup_remove_ccwdev,
4980 };
4981
4982 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4983 {
4984         int retries = 3;
4985         int rc;
4986
4987         QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
4988         atomic_set(&card->force_alloc_skb, 0);
4989         qeth_update_from_chp_desc(card);
4990 retry:
4991         if (retries < 3)
4992                 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4993                                  CARD_DEVID(card));
4994         rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
4995         ccw_device_set_offline(CARD_DDEV(card));
4996         ccw_device_set_offline(CARD_WDEV(card));
4997         ccw_device_set_offline(CARD_RDEV(card));
4998         qdio_free(CARD_DDEV(card));
4999         rc = ccw_device_set_online(CARD_RDEV(card));
5000         if (rc)
5001                 goto retriable;
5002         rc = ccw_device_set_online(CARD_WDEV(card));
5003         if (rc)
5004                 goto retriable;
5005         rc = ccw_device_set_online(CARD_DDEV(card));
5006         if (rc)
5007                 goto retriable;
5008 retriable:
5009         if (rc == -ERESTARTSYS) {
5010                 QETH_DBF_TEXT(SETUP, 2, "break1");
5011                 return rc;
5012         } else if (rc) {
5013                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
5014                 if (--retries < 0)
5015                         goto out;
5016                 else
5017                         goto retry;
5018         }
5019         qeth_determine_capabilities(card);
5020         qeth_init_tokens(card);
5021         qeth_init_func_level(card);
5022         rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
5023         if (rc == -ERESTARTSYS) {
5024                 QETH_DBF_TEXT(SETUP, 2, "break2");
5025                 return rc;
5026         } else if (rc) {
5027                 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
5028                 if (--retries < 0)
5029                         goto out;
5030                 else
5031                         goto retry;
5032         }
5033         rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
5034         if (rc == -ERESTARTSYS) {
5035                 QETH_DBF_TEXT(SETUP, 2, "break3");
5036                 return rc;
5037         } else if (rc) {
5038                 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
5039                 if (--retries < 0)
5040                         goto out;
5041                 else
5042                         goto retry;
5043         }
5044         card->read_or_write_problem = 0;
5045         rc = qeth_mpc_initialize(card);
5046         if (rc) {
5047                 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
5048                 goto out;
5049         }
5050
5051         rc = qeth_send_startlan(card);
5052         if (rc) {
5053                 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
5054                 if (rc == -ENETDOWN) {
5055                         dev_warn(&card->gdev->dev, "The LAN is offline\n");
5056                         *carrier_ok = false;
5057                 } else {
5058                         goto out;
5059                 }
5060         } else {
5061                 *carrier_ok = true;
5062         }
5063
5064         card->options.ipa4.supported_funcs = 0;
5065         card->options.ipa6.supported_funcs = 0;
5066         card->options.adp.supported_funcs = 0;
5067         card->options.sbp.supported_funcs = 0;
5068         card->info.diagass_support = 0;
5069         rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5070         if (rc == -ENOMEM)
5071                 goto out;
5072         if (qeth_is_supported(card, IPA_IPV6)) {
5073                 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5074                 if (rc == -ENOMEM)
5075                         goto out;
5076         }
5077         if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5078                 rc = qeth_query_setadapterparms(card);
5079                 if (rc < 0) {
5080                         QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
5081                         goto out;
5082                 }
5083         }
5084         if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5085                 rc = qeth_query_setdiagass(card);
5086                 if (rc < 0) {
5087                         QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
5088                         goto out;
5089                 }
5090         }
5091         return 0;
5092 out:
5093         dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5094                 "an error on the device\n");
5095         QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5096                          CARD_DEVID(card), rc);
5097         return rc;
5098 }
5099 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5100
5101 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5102                                  struct sk_buff *skb, int offset, int data_len)
5103 {
5104         struct page *page = virt_to_page(element->addr);
5105         unsigned int next_frag;
5106
5107         /* first fill the linear space */
5108         if (!skb->len) {
5109                 unsigned int linear = min(data_len, skb_tailroom(skb));
5110
5111                 skb_put_data(skb, element->addr + offset, linear);
5112                 data_len -= linear;
5113                 if (!data_len)
5114                         return;
5115                 offset += linear;
5116                 /* fall through to add page frag for remaining data */
5117         }
5118
5119         next_frag = skb_shinfo(skb)->nr_frags;
5120         get_page(page);
5121         skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5122 }
5123
5124 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5125 {
5126         return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5127 }
5128
5129 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5130                 struct qeth_qdio_buffer *qethbuffer,
5131                 struct qdio_buffer_element **__element, int *__offset,
5132                 struct qeth_hdr **hdr)
5133 {
5134         struct qdio_buffer_element *element = *__element;
5135         struct qdio_buffer *buffer = qethbuffer->buffer;
5136         int offset = *__offset;
5137         struct sk_buff *skb;
5138         int skb_len = 0;
5139         void *data_ptr;
5140         int data_len;
5141         int headroom = 0;
5142         int use_rx_sg = 0;
5143
5144         /* qeth_hdr must not cross element boundaries */
5145         while (element->length < offset + sizeof(struct qeth_hdr)) {
5146                 if (qeth_is_last_sbale(element))
5147                         return NULL;
5148                 element++;
5149                 offset = 0;
5150         }
5151         *hdr = element->addr + offset;
5152
5153         offset += sizeof(struct qeth_hdr);
5154         switch ((*hdr)->hdr.l2.id) {
5155         case QETH_HEADER_TYPE_LAYER2:
5156                 skb_len = (*hdr)->hdr.l2.pkt_length;
5157                 break;
5158         case QETH_HEADER_TYPE_LAYER3:
5159                 skb_len = (*hdr)->hdr.l3.length;
5160                 headroom = ETH_HLEN;
5161                 break;
5162         case QETH_HEADER_TYPE_OSN:
5163                 skb_len = (*hdr)->hdr.osn.pdu_length;
5164                 headroom = sizeof(struct qeth_hdr);
5165                 break;
5166         default:
5167                 break;
5168         }
5169
5170         if (!skb_len)
5171                 return NULL;
5172
5173         if (((skb_len >= card->options.rx_sg_cb) &&
5174              (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
5175              (!atomic_read(&card->force_alloc_skb))) ||
5176             (card->options.cq == QETH_CQ_ENABLED))
5177                 use_rx_sg = 1;
5178
5179         if (use_rx_sg && qethbuffer->rx_skb) {
5180                 /* QETH_CQ_ENABLED only: */
5181                 skb = qethbuffer->rx_skb;
5182                 qethbuffer->rx_skb = NULL;
5183         } else {
5184                 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5185
5186                 skb = napi_alloc_skb(&card->napi, linear + headroom);
5187         }
5188         if (!skb)
5189                 goto no_mem;
5190         if (headroom)
5191                 skb_reserve(skb, headroom);
5192
5193         data_ptr = element->addr + offset;
5194         while (skb_len) {
5195                 data_len = min(skb_len, (int)(element->length - offset));
5196                 if (data_len) {
5197                         if (use_rx_sg)
5198                                 qeth_create_skb_frag(element, skb, offset,
5199                                                      data_len);
5200                         else
5201                                 skb_put_data(skb, data_ptr, data_len);
5202                 }
5203                 skb_len -= data_len;
5204                 if (skb_len) {
5205                         if (qeth_is_last_sbale(element)) {
5206                                 QETH_CARD_TEXT(card, 4, "unexeob");
5207                                 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5208                                 dev_kfree_skb_any(skb);
5209                                 QETH_CARD_STAT_INC(card, rx_errors);
5210                                 return NULL;
5211                         }
5212                         element++;
5213                         offset = 0;
5214                         data_ptr = element->addr;
5215                 } else {
5216                         offset += data_len;
5217                 }
5218         }
5219         *__element = element;
5220         *__offset = offset;
5221         if (use_rx_sg) {
5222                 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5223                 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5224                                    skb_shinfo(skb)->nr_frags);
5225         }
5226         return skb;
5227 no_mem:
5228         if (net_ratelimit()) {
5229                 QETH_CARD_TEXT(card, 2, "noskbmem");
5230         }
5231         QETH_CARD_STAT_INC(card, rx_dropped);
5232         return NULL;
5233 }
5234 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5235
5236 int qeth_poll(struct napi_struct *napi, int budget)
5237 {
5238         struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5239         int work_done = 0;
5240         struct qeth_qdio_buffer *buffer;
5241         int done;
5242         int new_budget = budget;
5243
5244         while (1) {
5245                 if (!card->rx.b_count) {
5246                         card->rx.qdio_err = 0;
5247                         card->rx.b_count = qdio_get_next_buffers(
5248                                 card->data.ccwdev, 0, &card->rx.b_index,
5249                                 &card->rx.qdio_err);
5250                         if (card->rx.b_count <= 0) {
5251                                 card->rx.b_count = 0;
5252                                 break;
5253                         }
5254                         card->rx.b_element =
5255                                 &card->qdio.in_q->bufs[card->rx.b_index]
5256                                 .buffer->element[0];
5257                         card->rx.e_offset = 0;
5258                 }
5259
5260                 while (card->rx.b_count) {
5261                         buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5262                         if (!(card->rx.qdio_err &&
5263                             qeth_check_qdio_errors(card, buffer->buffer,
5264                             card->rx.qdio_err, "qinerr")))
5265                                 work_done +=
5266                                         card->discipline->process_rx_buffer(
5267                                                 card, new_budget, &done);
5268                         else
5269                                 done = 1;
5270
5271                         if (done) {
5272                                 QETH_CARD_STAT_INC(card, rx_bufs);
5273                                 qeth_put_buffer_pool_entry(card,
5274                                         buffer->pool_entry);
5275                                 qeth_queue_input_buffer(card, card->rx.b_index);
5276                                 card->rx.b_count--;
5277                                 if (card->rx.b_count) {
5278                                         card->rx.b_index =
5279                                                 (card->rx.b_index + 1) %
5280                                                 QDIO_MAX_BUFFERS_PER_Q;
5281                                         card->rx.b_element =
5282                                                 &card->qdio.in_q
5283                                                 ->bufs[card->rx.b_index]
5284                                                 .buffer->element[0];
5285                                         card->rx.e_offset = 0;
5286                                 }
5287                         }
5288
5289                         if (work_done >= budget)
5290                                 goto out;
5291                         else
5292                                 new_budget = budget - work_done;
5293                 }
5294         }
5295
5296         napi_complete_done(napi, work_done);
5297         if (qdio_start_irq(card->data.ccwdev, 0))
5298                 napi_schedule(&card->napi);
5299 out:
5300         return work_done;
5301 }
5302 EXPORT_SYMBOL_GPL(qeth_poll);
5303
5304 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5305 {
5306         if (!cmd->hdr.return_code)
5307                 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5308         return cmd->hdr.return_code;
5309 }
5310
5311 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5312                                         struct qeth_reply *reply,
5313                                         unsigned long data)
5314 {
5315         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5316         struct qeth_ipa_caps *caps = reply->param;
5317
5318         if (qeth_setassparms_inspect_rc(cmd))
5319                 return -EIO;
5320
5321         caps->supported = cmd->data.setassparms.data.caps.supported;
5322         caps->enabled = cmd->data.setassparms.data.caps.enabled;
5323         return 0;
5324 }
5325
5326 int qeth_setassparms_cb(struct qeth_card *card,
5327                         struct qeth_reply *reply, unsigned long data)
5328 {
5329         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5330
5331         QETH_CARD_TEXT(card, 4, "defadpcb");
5332
5333         if (cmd->hdr.return_code)
5334                 return -EIO;
5335
5336         cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5337         if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5338                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5339         if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5340                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5341         return 0;
5342 }
5343 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5344
5345 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5346                                                  enum qeth_ipa_funcs ipa_func,
5347                                                  __u16 cmd_code, __u16 len,
5348                                                  enum qeth_prot_versions prot)
5349 {
5350         struct qeth_cmd_buffer *iob;
5351         struct qeth_ipa_cmd *cmd;
5352
5353         QETH_CARD_TEXT(card, 4, "getasscm");
5354         iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
5355
5356         if (iob) {
5357                 cmd = __ipa_cmd(iob);
5358                 cmd->data.setassparms.hdr.assist_no = ipa_func;
5359                 cmd->data.setassparms.hdr.length = 8 + len;
5360                 cmd->data.setassparms.hdr.command_code = cmd_code;
5361         }
5362
5363         return iob;
5364 }
5365 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5366
5367 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5368                                       enum qeth_ipa_funcs ipa_func,
5369                                       u16 cmd_code, long data,
5370                                       enum qeth_prot_versions prot)
5371 {
5372         int length = 0;
5373         struct qeth_cmd_buffer *iob;
5374
5375         QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5376         if (data)
5377                 length = sizeof(__u32);
5378         iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5379         if (!iob)
5380                 return -ENOMEM;
5381
5382         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
5383         return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5384 }
5385 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5386
5387 static void qeth_unregister_dbf_views(void)
5388 {
5389         int x;
5390         for (x = 0; x < QETH_DBF_INFOS; x++) {
5391                 debug_unregister(qeth_dbf[x].id);
5392                 qeth_dbf[x].id = NULL;
5393         }
5394 }
5395
5396 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5397 {
5398         char dbf_txt_buf[32];
5399         va_list args;
5400
5401         if (!debug_level_enabled(id, level))
5402                 return;
5403         va_start(args, fmt);
5404         vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5405         va_end(args);
5406         debug_text_event(id, level, dbf_txt_buf);
5407 }
5408 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5409
5410 static int qeth_register_dbf_views(void)
5411 {
5412         int ret;
5413         int x;
5414
5415         for (x = 0; x < QETH_DBF_INFOS; x++) {
5416                 /* register the areas */
5417                 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5418                                                 qeth_dbf[x].pages,
5419                                                 qeth_dbf[x].areas,
5420                                                 qeth_dbf[x].len);
5421                 if (qeth_dbf[x].id == NULL) {
5422                         qeth_unregister_dbf_views();
5423                         return -ENOMEM;
5424                 }
5425
5426                 /* register a view */
5427                 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5428                 if (ret) {
5429                         qeth_unregister_dbf_views();
5430                         return ret;
5431                 }
5432
5433                 /* set a passing level */
5434                 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5435         }
5436
5437         return 0;
5438 }
5439
5440 static DEFINE_MUTEX(qeth_mod_mutex);    /* for synchronized module loading */
5441
5442 int qeth_core_load_discipline(struct qeth_card *card,
5443                 enum qeth_discipline_id discipline)
5444 {
5445         mutex_lock(&qeth_mod_mutex);
5446         switch (discipline) {
5447         case QETH_DISCIPLINE_LAYER3:
5448                 card->discipline = try_then_request_module(
5449                         symbol_get(qeth_l3_discipline), "qeth_l3");
5450                 break;
5451         case QETH_DISCIPLINE_LAYER2:
5452                 card->discipline = try_then_request_module(
5453                         symbol_get(qeth_l2_discipline), "qeth_l2");
5454                 break;
5455         default:
5456                 break;
5457         }
5458         mutex_unlock(&qeth_mod_mutex);
5459
5460         if (!card->discipline) {
5461                 dev_err(&card->gdev->dev, "There is no kernel module to "
5462                         "support discipline %d\n", discipline);
5463                 return -EINVAL;
5464         }
5465
5466         card->options.layer = discipline;
5467         return 0;
5468 }
5469
5470 void qeth_core_free_discipline(struct qeth_card *card)
5471 {
5472         if (IS_LAYER2(card))
5473                 symbol_put(qeth_l2_discipline);
5474         else
5475                 symbol_put(qeth_l3_discipline);
5476         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5477         card->discipline = NULL;
5478 }
5479
5480 const struct device_type qeth_generic_devtype = {
5481         .name = "qeth_generic",
5482         .groups = qeth_generic_attr_groups,
5483 };
5484 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5485
5486 static const struct device_type qeth_osn_devtype = {
5487         .name = "qeth_osn",
5488         .groups = qeth_osn_attr_groups,
5489 };
5490
5491 #define DBF_NAME_LEN    20
5492
5493 struct qeth_dbf_entry {
5494         char dbf_name[DBF_NAME_LEN];
5495         debug_info_t *dbf_info;
5496         struct list_head dbf_list;
5497 };
5498
5499 static LIST_HEAD(qeth_dbf_list);
5500 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5501
5502 static debug_info_t *qeth_get_dbf_entry(char *name)
5503 {
5504         struct qeth_dbf_entry *entry;
5505         debug_info_t *rc = NULL;
5506
5507         mutex_lock(&qeth_dbf_list_mutex);
5508         list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5509                 if (strcmp(entry->dbf_name, name) == 0) {
5510                         rc = entry->dbf_info;
5511                         break;
5512                 }
5513         }
5514         mutex_unlock(&qeth_dbf_list_mutex);
5515         return rc;
5516 }
5517
5518 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5519 {
5520         struct qeth_dbf_entry *new_entry;
5521
5522         card->debug = debug_register(name, 2, 1, 8);
5523         if (!card->debug) {
5524                 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5525                 goto err;
5526         }
5527         if (debug_register_view(card->debug, &debug_hex_ascii_view))
5528                 goto err_dbg;
5529         new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5530         if (!new_entry)
5531                 goto err_dbg;
5532         strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5533         new_entry->dbf_info = card->debug;
5534         mutex_lock(&qeth_dbf_list_mutex);
5535         list_add(&new_entry->dbf_list, &qeth_dbf_list);
5536         mutex_unlock(&qeth_dbf_list_mutex);
5537
5538         return 0;
5539
5540 err_dbg:
5541         debug_unregister(card->debug);
5542 err:
5543         return -ENOMEM;
5544 }
5545
5546 static void qeth_clear_dbf_list(void)
5547 {
5548         struct qeth_dbf_entry *entry, *tmp;
5549
5550         mutex_lock(&qeth_dbf_list_mutex);
5551         list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5552                 list_del(&entry->dbf_list);
5553                 debug_unregister(entry->dbf_info);
5554                 kfree(entry);
5555         }
5556         mutex_unlock(&qeth_dbf_list_mutex);
5557 }
5558
5559 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5560 {
5561         struct net_device *dev;
5562
5563         switch (card->info.type) {
5564         case QETH_CARD_TYPE_IQD:
5565                 dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
5566                 break;
5567         case QETH_CARD_TYPE_OSN:
5568                 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5569                 break;
5570         default:
5571                 dev = alloc_etherdev(0);
5572         }
5573
5574         if (!dev)
5575                 return NULL;
5576
5577         dev->ml_priv = card;
5578         dev->watchdog_timeo = QETH_TX_TIMEOUT;
5579         dev->min_mtu = IS_OSN(card) ? 64 : 576;
5580          /* initialized when device first goes online: */
5581         dev->max_mtu = 0;
5582         dev->mtu = 0;
5583         SET_NETDEV_DEV(dev, &card->gdev->dev);
5584         netif_carrier_off(dev);
5585
5586         if (IS_OSN(card)) {
5587                 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5588         } else {
5589                 dev->ethtool_ops = &qeth_ethtool_ops;
5590                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5591                 dev->hw_features |= NETIF_F_SG;
5592                 dev->vlan_features |= NETIF_F_SG;
5593                 if (IS_IQD(card))
5594                         dev->features |= NETIF_F_SG;
5595         }
5596
5597         return dev;
5598 }
5599
5600 struct net_device *qeth_clone_netdev(struct net_device *orig)
5601 {
5602         struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5603
5604         if (!clone)
5605                 return NULL;
5606
5607         clone->dev_port = orig->dev_port;
5608         return clone;
5609 }
5610
5611 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5612 {
5613         struct qeth_card *card;
5614         struct device *dev;
5615         int rc;
5616         enum qeth_discipline_id enforced_disc;
5617         char dbf_name[DBF_NAME_LEN];
5618
5619         QETH_DBF_TEXT(SETUP, 2, "probedev");
5620
5621         dev = &gdev->dev;
5622         if (!get_device(dev))
5623                 return -ENODEV;
5624
5625         QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5626
5627         card = qeth_alloc_card(gdev);
5628         if (!card) {
5629                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5630                 rc = -ENOMEM;
5631                 goto err_dev;
5632         }
5633
5634         snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5635                 dev_name(&gdev->dev));
5636         card->debug = qeth_get_dbf_entry(dbf_name);
5637         if (!card->debug) {
5638                 rc = qeth_add_dbf_entry(card, dbf_name);
5639                 if (rc)
5640                         goto err_card;
5641         }
5642
5643         qeth_setup_card(card);
5644         qeth_update_from_chp_desc(card);
5645
5646         card->dev = qeth_alloc_netdev(card);
5647         if (!card->dev) {
5648                 rc = -ENOMEM;
5649                 goto err_card;
5650         }
5651
5652         qeth_determine_capabilities(card);
5653         enforced_disc = qeth_enforce_discipline(card);
5654         switch (enforced_disc) {
5655         case QETH_DISCIPLINE_UNDETERMINED:
5656                 gdev->dev.type = &qeth_generic_devtype;
5657                 break;
5658         default:
5659                 card->info.layer_enforced = true;
5660                 rc = qeth_core_load_discipline(card, enforced_disc);
5661                 if (rc)
5662                         goto err_load;
5663
5664                 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5665                                         ? card->discipline->devtype
5666                                         : &qeth_osn_devtype;
5667                 rc = card->discipline->setup(card->gdev);
5668                 if (rc)
5669                         goto err_disc;
5670                 break;
5671         }
5672
5673         return 0;
5674
5675 err_disc:
5676         qeth_core_free_discipline(card);
5677 err_load:
5678         free_netdev(card->dev);
5679 err_card:
5680         qeth_core_free_card(card);
5681 err_dev:
5682         put_device(dev);
5683         return rc;
5684 }
5685
5686 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5687 {
5688         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5689
5690         QETH_DBF_TEXT(SETUP, 2, "removedv");
5691
5692         if (card->discipline) {
5693                 card->discipline->remove(gdev);
5694                 qeth_core_free_discipline(card);
5695         }
5696
5697         free_netdev(card->dev);
5698         qeth_core_free_card(card);
5699         put_device(&gdev->dev);
5700 }
5701
5702 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5703 {
5704         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5705         int rc = 0;
5706         enum qeth_discipline_id def_discipline;
5707
5708         if (!card->discipline) {
5709                 if (card->info.type == QETH_CARD_TYPE_IQD)
5710                         def_discipline = QETH_DISCIPLINE_LAYER3;
5711                 else
5712                         def_discipline = QETH_DISCIPLINE_LAYER2;
5713                 rc = qeth_core_load_discipline(card, def_discipline);
5714                 if (rc)
5715                         goto err;
5716                 rc = card->discipline->setup(card->gdev);
5717                 if (rc) {
5718                         qeth_core_free_discipline(card);
5719                         goto err;
5720                 }
5721         }
5722         rc = card->discipline->set_online(gdev);
5723 err:
5724         return rc;
5725 }
5726
5727 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5728 {
5729         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5730         return card->discipline->set_offline(gdev);
5731 }
5732
5733 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5734 {
5735         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5736         qeth_set_allowed_threads(card, 0, 1);
5737         if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5738                 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5739         qeth_qdio_clear_card(card, 0);
5740         qeth_clear_qdio_buffers(card);
5741         qdio_free(CARD_DDEV(card));
5742 }
5743
5744 static int qeth_core_freeze(struct ccwgroup_device *gdev)
5745 {
5746         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5747         if (card->discipline && card->discipline->freeze)
5748                 return card->discipline->freeze(gdev);
5749         return 0;
5750 }
5751
5752 static int qeth_core_thaw(struct ccwgroup_device *gdev)
5753 {
5754         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5755         if (card->discipline && card->discipline->thaw)
5756                 return card->discipline->thaw(gdev);
5757         return 0;
5758 }
5759
5760 static int qeth_core_restore(struct ccwgroup_device *gdev)
5761 {
5762         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5763         if (card->discipline && card->discipline->restore)
5764                 return card->discipline->restore(gdev);
5765         return 0;
5766 }
5767
5768 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5769                            size_t count)
5770 {
5771         int err;
5772
5773         err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5774                                   buf);
5775
5776         return err ? err : count;
5777 }
5778 static DRIVER_ATTR_WO(group);
5779
5780 static struct attribute *qeth_drv_attrs[] = {
5781         &driver_attr_group.attr,
5782         NULL,
5783 };
5784 static struct attribute_group qeth_drv_attr_group = {
5785         .attrs = qeth_drv_attrs,
5786 };
5787 static const struct attribute_group *qeth_drv_attr_groups[] = {
5788         &qeth_drv_attr_group,
5789         NULL,
5790 };
5791
5792 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5793         .driver = {
5794                 .groups = qeth_drv_attr_groups,
5795                 .owner = THIS_MODULE,
5796                 .name = "qeth",
5797         },
5798         .ccw_driver = &qeth_ccw_driver,
5799         .setup = qeth_core_probe_device,
5800         .remove = qeth_core_remove_device,
5801         .set_online = qeth_core_set_online,
5802         .set_offline = qeth_core_set_offline,
5803         .shutdown = qeth_core_shutdown,
5804         .prepare = NULL,
5805         .complete = NULL,
5806         .freeze = qeth_core_freeze,
5807         .thaw = qeth_core_thaw,
5808         .restore = qeth_core_restore,
5809 };
5810
5811 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5812 {
5813         struct ccwgroup_device *gdev;
5814         struct qeth_card *card;
5815
5816         gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5817         if (!gdev)
5818                 return NULL;
5819
5820         card = dev_get_drvdata(&gdev->dev);
5821         put_device(&gdev->dev);
5822         return card;
5823 }
5824 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5825
5826 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5827 {
5828         struct qeth_card *card = dev->ml_priv;
5829         struct mii_ioctl_data *mii_data;
5830         int rc = 0;
5831
5832         if (!card)
5833                 return -ENODEV;
5834
5835         switch (cmd) {
5836         case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5837                 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5838                 break;
5839         case SIOC_QETH_GET_CARD_TYPE:
5840                 if ((card->info.type == QETH_CARD_TYPE_OSD ||
5841                      card->info.type == QETH_CARD_TYPE_OSM ||
5842                      card->info.type == QETH_CARD_TYPE_OSX) &&
5843                     !card->info.guestlan)
5844                         return 1;
5845                 else
5846                         return 0;
5847         case SIOCGMIIPHY:
5848                 mii_data = if_mii(rq);
5849                 mii_data->phy_id = 0;
5850                 break;
5851         case SIOCGMIIREG:
5852                 mii_data = if_mii(rq);
5853                 if (mii_data->phy_id != 0)
5854                         rc = -EINVAL;
5855                 else
5856                         mii_data->val_out = qeth_mdio_read(dev,
5857                                 mii_data->phy_id, mii_data->reg_num);
5858                 break;
5859         case SIOC_QETH_QUERY_OAT:
5860                 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5861                 break;
5862         default:
5863                 if (card->discipline->do_ioctl)
5864                         rc = card->discipline->do_ioctl(dev, rq, cmd);
5865                 else
5866                         rc = -EOPNOTSUPP;
5867         }
5868         if (rc)
5869                 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5870         return rc;
5871 }
5872 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5873
5874 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
5875                               unsigned long data)
5876 {
5877         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5878         u32 *features = reply->param;
5879
5880         if (qeth_setassparms_inspect_rc(cmd))
5881                 return -EIO;
5882
5883         *features = cmd->data.setassparms.data.flags_32bit;
5884         return 0;
5885 }
5886
5887 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5888                              enum qeth_prot_versions prot)
5889 {
5890         return qeth_send_simple_setassparms_prot(card, cstype,
5891                                                  IPA_CMD_ASS_STOP, 0, prot);
5892 }
5893
5894 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5895                             enum qeth_prot_versions prot)
5896 {
5897         u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
5898         struct qeth_cmd_buffer *iob;
5899         struct qeth_ipa_caps caps;
5900         u32 features;
5901         int rc;
5902
5903         /* some L3 HW requires combined L3+L4 csum offload: */
5904         if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
5905             cstype == IPA_OUTBOUND_CHECKSUM)
5906                 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
5907
5908         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
5909                                        prot);
5910         if (!iob)
5911                 return -ENOMEM;
5912
5913         rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
5914         if (rc)
5915                 return rc;
5916
5917         if ((required_features & features) != required_features) {
5918                 qeth_set_csum_off(card, cstype, prot);
5919                 return -EOPNOTSUPP;
5920         }
5921
5922         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4,
5923                                        prot);
5924         if (!iob) {
5925                 qeth_set_csum_off(card, cstype, prot);
5926                 return -ENOMEM;
5927         }
5928
5929         if (features & QETH_IPA_CHECKSUM_LP2LP)
5930                 required_features |= QETH_IPA_CHECKSUM_LP2LP;
5931         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
5932         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
5933         if (rc) {
5934                 qeth_set_csum_off(card, cstype, prot);
5935                 return rc;
5936         }
5937
5938         if (!qeth_ipa_caps_supported(&caps, required_features) ||
5939             !qeth_ipa_caps_enabled(&caps, required_features)) {
5940                 qeth_set_csum_off(card, cstype, prot);
5941                 return -EOPNOTSUPP;
5942         }
5943
5944         dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
5945                  cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
5946         if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
5947             cstype == IPA_OUTBOUND_CHECKSUM)
5948                 dev_warn(&card->gdev->dev,
5949                          "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
5950                          QETH_CARD_IFNAME(card));
5951         return 0;
5952 }
5953
5954 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
5955                              enum qeth_prot_versions prot)
5956 {
5957         return on ? qeth_set_csum_on(card, cstype, prot) :
5958                     qeth_set_csum_off(card, cstype, prot);
5959 }
5960
5961 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
5962                              unsigned long data)
5963 {
5964         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5965         struct qeth_tso_start_data *tso_data = reply->param;
5966
5967         if (qeth_setassparms_inspect_rc(cmd))
5968                 return -EIO;
5969
5970         tso_data->mss = cmd->data.setassparms.data.tso.mss;
5971         tso_data->supported = cmd->data.setassparms.data.tso.supported;
5972         return 0;
5973 }
5974
5975 static int qeth_set_tso_off(struct qeth_card *card,
5976                             enum qeth_prot_versions prot)
5977 {
5978         return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
5979                                                  IPA_CMD_ASS_STOP, 0, prot);
5980 }
5981
5982 static int qeth_set_tso_on(struct qeth_card *card,
5983                            enum qeth_prot_versions prot)
5984 {
5985         struct qeth_tso_start_data tso_data;
5986         struct qeth_cmd_buffer *iob;
5987         struct qeth_ipa_caps caps;
5988         int rc;
5989
5990         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
5991                                        IPA_CMD_ASS_START, 0, prot);
5992         if (!iob)
5993                 return -ENOMEM;
5994
5995         rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
5996         if (rc)
5997                 return rc;
5998
5999         if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6000                 qeth_set_tso_off(card, prot);
6001                 return -EOPNOTSUPP;
6002         }
6003
6004         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6005                                        IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
6006         if (!iob) {
6007                 qeth_set_tso_off(card, prot);
6008                 return -ENOMEM;
6009         }
6010
6011         /* enable TSO capability */
6012         __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6013                 QETH_IPA_LARGE_SEND_TCP;
6014         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6015         if (rc) {
6016                 qeth_set_tso_off(card, prot);
6017                 return rc;
6018         }
6019
6020         if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6021             !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6022                 qeth_set_tso_off(card, prot);
6023                 return -EOPNOTSUPP;
6024         }
6025
6026         dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6027                  tso_data.mss);
6028         return 0;
6029 }
6030
6031 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6032                             enum qeth_prot_versions prot)
6033 {
6034         return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6035 }
6036
6037 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6038 {
6039         int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6040         int rc_ipv6;
6041
6042         if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6043                 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6044                                             QETH_PROT_IPV4);
6045         if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6046                 /* no/one Offload Assist available, so the rc is trivial */
6047                 return rc_ipv4;
6048
6049         rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6050                                     QETH_PROT_IPV6);
6051
6052         if (on)
6053                 /* enable: success if any Assist is active */
6054                 return (rc_ipv6) ? rc_ipv4 : 0;
6055
6056         /* disable: failure if any Assist is still active */
6057         return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6058 }
6059
6060 /**
6061  * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6062  * @dev:        a net_device
6063  */
6064 void qeth_enable_hw_features(struct net_device *dev)
6065 {
6066         struct qeth_card *card = dev->ml_priv;
6067         netdev_features_t features;
6068
6069         features = dev->features;
6070         /* force-off any feature that might need an IPA sequence.
6071          * netdev_update_features() will restart them.
6072          */
6073         dev->features &= ~dev->hw_features;
6074         /* toggle VLAN filter, so that VIDs are re-programmed: */
6075         if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6076                 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6077                 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6078         }
6079         netdev_update_features(dev);
6080         if (features != dev->features)
6081                 dev_warn(&card->gdev->dev,
6082                          "Device recovery failed to restore all offload features\n");
6083 }
6084 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6085
6086 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6087 {
6088         struct qeth_card *card = dev->ml_priv;
6089         netdev_features_t changed = dev->features ^ features;
6090         int rc = 0;
6091
6092         QETH_DBF_TEXT(SETUP, 2, "setfeat");
6093         QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6094
6095         if ((changed & NETIF_F_IP_CSUM)) {
6096                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6097                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6098                 if (rc)
6099                         changed ^= NETIF_F_IP_CSUM;
6100         }
6101         if (changed & NETIF_F_IPV6_CSUM) {
6102                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6103                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6104                 if (rc)
6105                         changed ^= NETIF_F_IPV6_CSUM;
6106         }
6107         if (changed & NETIF_F_RXCSUM) {
6108                 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6109                 if (rc)
6110                         changed ^= NETIF_F_RXCSUM;
6111         }
6112         if (changed & NETIF_F_TSO) {
6113                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6114                                       QETH_PROT_IPV4);
6115                 if (rc)
6116                         changed ^= NETIF_F_TSO;
6117         }
6118         if (changed & NETIF_F_TSO6) {
6119                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6120                                       QETH_PROT_IPV6);
6121                 if (rc)
6122                         changed ^= NETIF_F_TSO6;
6123         }
6124
6125         /* everything changed successfully? */
6126         if ((dev->features ^ features) == changed)
6127                 return 0;
6128         /* something went wrong. save changed features and return error */
6129         dev->features ^= changed;
6130         return -EIO;
6131 }
6132 EXPORT_SYMBOL_GPL(qeth_set_features);
6133
6134 netdev_features_t qeth_fix_features(struct net_device *dev,
6135                                     netdev_features_t features)
6136 {
6137         struct qeth_card *card = dev->ml_priv;
6138
6139         QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6140         if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6141                 features &= ~NETIF_F_IP_CSUM;
6142         if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6143                 features &= ~NETIF_F_IPV6_CSUM;
6144         if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6145             !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6146                 features &= ~NETIF_F_RXCSUM;
6147         if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6148                 features &= ~NETIF_F_TSO;
6149         if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6150                 features &= ~NETIF_F_TSO6;
6151
6152         QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6153         return features;
6154 }
6155 EXPORT_SYMBOL_GPL(qeth_fix_features);
6156
6157 netdev_features_t qeth_features_check(struct sk_buff *skb,
6158                                       struct net_device *dev,
6159                                       netdev_features_t features)
6160 {
6161         /* GSO segmentation builds skbs with
6162          *      a (small) linear part for the headers, and
6163          *      page frags for the data.
6164          * Compared to a linear skb, the header-only part consumes an
6165          * additional buffer element. This reduces buffer utilization, and
6166          * hurts throughput. So compress small segments into one element.
6167          */
6168         if (netif_needs_gso(skb, features)) {
6169                 /* match skb_segment(): */
6170                 unsigned int doffset = skb->data - skb_mac_header(skb);
6171                 unsigned int hsize = skb_shinfo(skb)->gso_size;
6172                 unsigned int hroom = skb_headroom(skb);
6173
6174                 /* linearize only if resulting skb allocations are order-0: */
6175                 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6176                         features &= ~NETIF_F_SG;
6177         }
6178
6179         return vlan_features_check(skb, features);
6180 }
6181 EXPORT_SYMBOL_GPL(qeth_features_check);
6182
6183 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6184 {
6185         struct qeth_card *card = dev->ml_priv;
6186         struct qeth_qdio_out_q *queue;
6187         unsigned int i;
6188
6189         QETH_CARD_TEXT(card, 5, "getstat");
6190
6191         stats->rx_packets = card->stats.rx_packets;
6192         stats->rx_bytes = card->stats.rx_bytes;
6193         stats->rx_errors = card->stats.rx_errors;
6194         stats->rx_dropped = card->stats.rx_dropped;
6195         stats->multicast = card->stats.rx_multicast;
6196         stats->tx_errors = card->stats.tx_errors;
6197
6198         for (i = 0; i < card->qdio.no_out_queues; i++) {
6199                 queue = card->qdio.out_qs[i];
6200
6201                 stats->tx_packets += queue->stats.tx_packets;
6202                 stats->tx_bytes += queue->stats.tx_bytes;
6203                 stats->tx_errors += queue->stats.tx_errors;
6204                 stats->tx_dropped += queue->stats.tx_dropped;
6205         }
6206 }
6207 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6208
6209 int qeth_open(struct net_device *dev)
6210 {
6211         struct qeth_card *card = dev->ml_priv;
6212
6213         QETH_CARD_TEXT(card, 4, "qethopen");
6214
6215         if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6216                 return -EIO;
6217
6218         card->data.state = CH_STATE_UP;
6219         netif_start_queue(dev);
6220
6221         napi_enable(&card->napi);
6222         local_bh_disable();
6223         napi_schedule(&card->napi);
6224         /* kick-start the NAPI softirq: */
6225         local_bh_enable();
6226         return 0;
6227 }
6228 EXPORT_SYMBOL_GPL(qeth_open);
6229
6230 int qeth_stop(struct net_device *dev)
6231 {
6232         struct qeth_card *card = dev->ml_priv;
6233
6234         QETH_CARD_TEXT(card, 4, "qethstop");
6235         netif_tx_disable(dev);
6236         napi_disable(&card->napi);
6237         return 0;
6238 }
6239 EXPORT_SYMBOL_GPL(qeth_stop);
6240
6241 static int __init qeth_core_init(void)
6242 {
6243         int rc;
6244
6245         pr_info("loading core functions\n");
6246
6247         rc = qeth_register_dbf_views();
6248         if (rc)
6249                 goto dbf_err;
6250         qeth_core_root_dev = root_device_register("qeth");
6251         rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6252         if (rc)
6253                 goto register_err;
6254         qeth_core_header_cache =
6255                 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6256                                   roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6257                                   0, NULL);
6258         if (!qeth_core_header_cache) {
6259                 rc = -ENOMEM;
6260                 goto slab_err;
6261         }
6262         qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6263                         sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6264         if (!qeth_qdio_outbuf_cache) {
6265                 rc = -ENOMEM;
6266                 goto cqslab_err;
6267         }
6268         rc = ccw_driver_register(&qeth_ccw_driver);
6269         if (rc)
6270                 goto ccw_err;
6271         rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6272         if (rc)
6273                 goto ccwgroup_err;
6274
6275         return 0;
6276
6277 ccwgroup_err:
6278         ccw_driver_unregister(&qeth_ccw_driver);
6279 ccw_err:
6280         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6281 cqslab_err:
6282         kmem_cache_destroy(qeth_core_header_cache);
6283 slab_err:
6284         root_device_unregister(qeth_core_root_dev);
6285 register_err:
6286         qeth_unregister_dbf_views();
6287 dbf_err:
6288         pr_err("Initializing the qeth device driver failed\n");
6289         return rc;
6290 }
6291
6292 static void __exit qeth_core_exit(void)
6293 {
6294         qeth_clear_dbf_list();
6295         ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6296         ccw_driver_unregister(&qeth_ccw_driver);
6297         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6298         kmem_cache_destroy(qeth_core_header_cache);
6299         root_device_unregister(qeth_core_root_dev);
6300         qeth_unregister_dbf_views();
6301         pr_info("core functions removed\n");
6302 }
6303
6304 module_init(qeth_core_init);
6305 module_exit(qeth_core_exit);
6306 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6307 MODULE_DESCRIPTION("qeth core functions");
6308 MODULE_LICENSE("GPL");