Merge tag 'kbuild-fixes-v5.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_uld.c
1 /*
2  * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  *  Written by: Atul Gupta (atul.gupta@chelsio.com)
35  *  Written by: Hariprasad Shenai (hariprasad@chelsio.com)
36  */
37
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
47
48 #include "cxgb4.h"
49 #include "cxgb4_uld.h"
50 #include "t4_regs.h"
51 #include "t4fw_api.h"
52 #include "t4_msg.h"
53
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55
56 static int get_msix_idx_from_bmap(struct adapter *adap)
57 {
58         struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
59         unsigned long flags;
60         unsigned int msix_idx;
61
62         spin_lock_irqsave(&bmap->lock, flags);
63         msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64         if (msix_idx < bmap->mapsize) {
65                 __set_bit(msix_idx, bmap->msix_bmap);
66         } else {
67                 spin_unlock_irqrestore(&bmap->lock, flags);
68                 return -ENOSPC;
69         }
70
71         spin_unlock_irqrestore(&bmap->lock, flags);
72         return msix_idx;
73 }
74
75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
76 {
77         struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
78         unsigned long flags;
79
80         spin_lock_irqsave(&bmap->lock, flags);
81         __clear_bit(msix_idx, bmap->msix_bmap);
82         spin_unlock_irqrestore(&bmap->lock, flags);
83 }
84
85 /* Flush the aggregated lro sessions */
86 static void uldrx_flush_handler(struct sge_rspq *q)
87 {
88         struct adapter *adap = q->adap;
89
90         if (adap->uld[q->uld].lro_flush)
91                 adap->uld[q->uld].lro_flush(&q->lro_mgr);
92 }
93
94 /**
95  *      uldrx_handler - response queue handler for ULD queues
96  *      @q: the response queue that received the packet
97  *      @rsp: the response queue descriptor holding the offload message
98  *      @gl: the gather list of packet fragments
99  *
100  *      Deliver an ingress offload packet to a ULD.  All processing is done by
101  *      the ULD, we just maintain statistics.
102  */
103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104                          const struct pkt_gl *gl)
105 {
106         struct adapter *adap = q->adap;
107         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
108         int ret;
109
110         /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
113                 rsp += 2;
114
115         if (q->flush_handler)
116                 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117                                 rsp, gl, &q->lro_mgr,
118                                 &q->napi);
119         else
120                 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
121                                 rsp, gl);
122
123         if (ret) {
124                 rxq->stats.nomem++;
125                 return -1;
126         }
127
128         if (!gl)
129                 rxq->stats.imm++;
130         else if (gl == CXGB4_MSG_AN)
131                 rxq->stats.an++;
132         else
133                 rxq->stats.pkts++;
134         return 0;
135 }
136
137 static int alloc_uld_rxqs(struct adapter *adap,
138                           struct sge_uld_rxq_info *rxq_info, bool lro)
139 {
140         struct sge *s = &adap->sge;
141         unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
142         struct sge_ofld_rxq *q = rxq_info->uldrxq;
143         unsigned short *ids = rxq_info->rspq_id;
144         unsigned int bmap_idx = 0;
145         unsigned int per_chan;
146         int i, err, msi_idx, que_idx = 0;
147
148         per_chan = rxq_info->nrxq / adap->params.nports;
149
150         if (adap->flags & USING_MSIX)
151                 msi_idx = 1;
152         else
153                 msi_idx = -((int)s->intrq.abs_id + 1);
154
155         for (i = 0; i < nq; i++, q++) {
156                 if (i == rxq_info->nrxq) {
157                         /* start allocation of concentrator queues */
158                         per_chan = rxq_info->nciq / adap->params.nports;
159                         que_idx = 0;
160                 }
161
162                 if (msi_idx >= 0) {
163                         bmap_idx = get_msix_idx_from_bmap(adap);
164                         msi_idx = adap->msix_info_ulds[bmap_idx].idx;
165                 }
166                 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
167                                        adap->port[que_idx++ / per_chan],
168                                        msi_idx,
169                                        q->fl.size ? &q->fl : NULL,
170                                        uldrx_handler,
171                                        lro ? uldrx_flush_handler : NULL,
172                                        0);
173                 if (err)
174                         goto freeout;
175                 if (msi_idx >= 0)
176                         rxq_info->msix_tbl[i] = bmap_idx;
177                 memset(&q->stats, 0, sizeof(q->stats));
178                 if (ids)
179                         ids[i] = q->rspq.abs_id;
180         }
181         return 0;
182 freeout:
183         q = rxq_info->uldrxq;
184         for ( ; i; i--, q++) {
185                 if (q->rspq.desc)
186                         free_rspq_fl(adap, &q->rspq,
187                                      q->fl.size ? &q->fl : NULL);
188         }
189         return err;
190 }
191
192 static int
193 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
194 {
195         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
196         int i, ret = 0;
197
198         if (adap->flags & USING_MSIX) {
199                 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
200                                              sizeof(unsigned short),
201                                              GFP_KERNEL);
202                 if (!rxq_info->msix_tbl)
203                         return -ENOMEM;
204         }
205
206         ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
207
208         /* Tell uP to route control queue completions to rdma rspq */
209         if (adap->flags & FULL_INIT_DONE &&
210             !ret && uld_type == CXGB4_ULD_RDMA) {
211                 struct sge *s = &adap->sge;
212                 unsigned int cmplqid;
213                 u32 param, cmdop;
214
215                 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
216                 for_each_port(adap, i) {
217                         cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
218                         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
219                                  FW_PARAMS_PARAM_X_V(cmdop) |
220                                  FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
221                         ret = t4_set_params(adap, adap->mbox, adap->pf,
222                                             0, 1, &param, &cmplqid);
223                 }
224         }
225         return ret;
226 }
227
228 static void t4_free_uld_rxqs(struct adapter *adap, int n,
229                              struct sge_ofld_rxq *q)
230 {
231         for ( ; n; n--, q++) {
232                 if (q->rspq.desc)
233                         free_rspq_fl(adap, &q->rspq,
234                                      q->fl.size ? &q->fl : NULL);
235         }
236 }
237
238 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
239 {
240         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
241
242         if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
243                 struct sge *s = &adap->sge;
244                 u32 param, cmdop, cmplqid = 0;
245                 int i;
246
247                 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
248                 for_each_port(adap, i) {
249                         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250                                  FW_PARAMS_PARAM_X_V(cmdop) |
251                                  FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
252                         t4_set_params(adap, adap->mbox, adap->pf,
253                                       0, 1, &param, &cmplqid);
254                 }
255         }
256
257         if (rxq_info->nciq)
258                 t4_free_uld_rxqs(adap, rxq_info->nciq,
259                                  rxq_info->uldrxq + rxq_info->nrxq);
260         t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
261         if (adap->flags & USING_MSIX)
262                 kfree(rxq_info->msix_tbl);
263 }
264
265 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
266                           const struct cxgb4_uld_info *uld_info)
267 {
268         struct sge *s = &adap->sge;
269         struct sge_uld_rxq_info *rxq_info;
270         int i, nrxq, ciq_size;
271
272         rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
273         if (!rxq_info)
274                 return -ENOMEM;
275
276         if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
277                 i = s->nqs_per_uld;
278                 rxq_info->nrxq = roundup(i, adap->params.nports);
279         } else {
280                 i = min_t(int, uld_info->nrxq,
281                           num_online_cpus());
282                 rxq_info->nrxq = roundup(i, adap->params.nports);
283         }
284         if (!uld_info->ciq) {
285                 rxq_info->nciq = 0;
286         } else  {
287                 if (adap->flags & USING_MSIX)
288                         rxq_info->nciq = min_t(int, s->nqs_per_uld,
289                                                num_online_cpus());
290                 else
291                         rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
292                                                num_online_cpus());
293                 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
294                                   adap->params.nports);
295                 rxq_info->nciq = max_t(int, rxq_info->nciq,
296                                        adap->params.nports);
297         }
298
299         nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
300         rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
301                                    GFP_KERNEL);
302         if (!rxq_info->uldrxq) {
303                 kfree(rxq_info);
304                 return -ENOMEM;
305         }
306
307         rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
308         if (!rxq_info->rspq_id) {
309                 kfree(rxq_info->uldrxq);
310                 kfree(rxq_info);
311                 return -ENOMEM;
312         }
313
314         for (i = 0; i < rxq_info->nrxq; i++) {
315                 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
316
317                 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
318                 r->rspq.uld = uld_type;
319                 r->fl.size = 72;
320         }
321
322         ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
323         if (ciq_size > SGE_MAX_IQ_SIZE) {
324                 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
325                 ciq_size = SGE_MAX_IQ_SIZE;
326         }
327
328         for (i = rxq_info->nrxq; i < nrxq; i++) {
329                 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
330
331                 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
332                 r->rspq.uld = uld_type;
333         }
334
335         memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
336         adap->sge.uld_rxq_info[uld_type] = rxq_info;
337
338         return 0;
339 }
340
341 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
342 {
343         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
344
345         adap->sge.uld_rxq_info[uld_type] = NULL;
346         kfree(rxq_info->rspq_id);
347         kfree(rxq_info->uldrxq);
348         kfree(rxq_info);
349 }
350
351 static int
352 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
353 {
354         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
355         int err = 0;
356         unsigned int idx, bmap_idx;
357
358         for_each_uldrxq(rxq_info, idx) {
359                 bmap_idx = rxq_info->msix_tbl[idx];
360                 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
361                                   t4_sge_intr_msix, 0,
362                                   adap->msix_info_ulds[bmap_idx].desc,
363                                   &rxq_info->uldrxq[idx].rspq);
364                 if (err)
365                         goto unwind;
366         }
367         return 0;
368 unwind:
369         while (idx-- > 0) {
370                 bmap_idx = rxq_info->msix_tbl[idx];
371                 free_msix_idx_in_bmap(adap, bmap_idx);
372                 free_irq(adap->msix_info_ulds[bmap_idx].vec,
373                          &rxq_info->uldrxq[idx].rspq);
374         }
375         return err;
376 }
377
378 static void
379 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
380 {
381         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
382         unsigned int idx, bmap_idx;
383
384         for_each_uldrxq(rxq_info, idx) {
385                 bmap_idx = rxq_info->msix_tbl[idx];
386
387                 free_msix_idx_in_bmap(adap, bmap_idx);
388                 free_irq(adap->msix_info_ulds[bmap_idx].vec,
389                          &rxq_info->uldrxq[idx].rspq);
390         }
391 }
392
393 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
394 {
395         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
396         int n = sizeof(adap->msix_info_ulds[0].desc);
397         unsigned int idx, bmap_idx;
398
399         for_each_uldrxq(rxq_info, idx) {
400                 bmap_idx = rxq_info->msix_tbl[idx];
401
402                 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
403                          adap->port[0]->name, rxq_info->name, idx);
404         }
405 }
406
407 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
408 {
409         if (!q)
410                 return;
411
412         if (q->handler)
413                 napi_enable(&q->napi);
414
415         /* 0-increment GTS to start the timer and enable interrupts */
416         t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
417                      SEINTARM_V(q->intr_params) |
418                      INGRESSQID_V(q->cntxt_id));
419 }
420
421 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
422 {
423         if (q && q->handler)
424                 napi_disable(&q->napi);
425 }
426
427 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
428 {
429         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
430         int idx;
431
432         for_each_uldrxq(rxq_info, idx)
433                 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
434 }
435
436 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
437 {
438         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
439         int idx;
440
441         for_each_uldrxq(rxq_info, idx)
442                 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
443 }
444
445 static void
446 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
447 {
448         int nq = txq_info->ntxq;
449         int i;
450
451         for (i = 0; i < nq; i++) {
452                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
453
454                 if (txq && txq->q.desc) {
455                         tasklet_kill(&txq->qresume_tsk);
456                         t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
457                                         txq->q.cntxt_id);
458                         free_tx_desc(adap, &txq->q, txq->q.in_use, false);
459                         kfree(txq->q.sdesc);
460                         __skb_queue_purge(&txq->sendq);
461                         free_txq(adap, &txq->q);
462                 }
463         }
464 }
465
466 static int
467 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
468                   unsigned int uld_type)
469 {
470         struct sge *s = &adap->sge;
471         int nq = txq_info->ntxq;
472         int i, j, err;
473
474         j = nq / adap->params.nports;
475         for (i = 0; i < nq; i++) {
476                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
477
478                 txq->q.size = 1024;
479                 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
480                                            s->fw_evtq.cntxt_id, uld_type);
481                 if (err)
482                         goto freeout;
483         }
484         return 0;
485 freeout:
486         free_sge_txq_uld(adap, txq_info);
487         return err;
488 }
489
490 static void
491 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
492 {
493         struct sge_uld_txq_info *txq_info = NULL;
494         int tx_uld_type = TX_ULD(uld_type);
495
496         txq_info = adap->sge.uld_txq_info[tx_uld_type];
497
498         if (txq_info && atomic_dec_and_test(&txq_info->users)) {
499                 free_sge_txq_uld(adap, txq_info);
500                 kfree(txq_info->uldtxq);
501                 kfree(txq_info);
502                 adap->sge.uld_txq_info[tx_uld_type] = NULL;
503         }
504 }
505
506 static int
507 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
508                   const struct cxgb4_uld_info *uld_info)
509 {
510         struct sge_uld_txq_info *txq_info = NULL;
511         int tx_uld_type, i;
512
513         tx_uld_type = TX_ULD(uld_type);
514         txq_info = adap->sge.uld_txq_info[tx_uld_type];
515
516         if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
517             (atomic_inc_return(&txq_info->users) > 1))
518                 return 0;
519
520         txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
521         if (!txq_info)
522                 return -ENOMEM;
523         if (uld_type == CXGB4_ULD_CRYPTO) {
524                 i = min_t(int, adap->vres.ncrypto_fc,
525                           num_online_cpus());
526                 txq_info->ntxq = rounddown(i, adap->params.nports);
527                 if (txq_info->ntxq <= 0) {
528                         dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
529                         kfree(txq_info);
530                         return -EINVAL;
531                 }
532
533         } else {
534                 i = min_t(int, uld_info->ntxq, num_online_cpus());
535                 txq_info->ntxq = roundup(i, adap->params.nports);
536         }
537         txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
538                                    GFP_KERNEL);
539         if (!txq_info->uldtxq) {
540                 kfree(txq_info);
541                 return -ENOMEM;
542         }
543
544         if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
545                 kfree(txq_info->uldtxq);
546                 kfree(txq_info);
547                 return -ENOMEM;
548         }
549
550         atomic_inc(&txq_info->users);
551         adap->sge.uld_txq_info[tx_uld_type] = txq_info;
552         return 0;
553 }
554
555 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
556                            struct cxgb4_lld_info *lli)
557 {
558         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
559         int tx_uld_type = TX_ULD(uld_type);
560         struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
561
562         lli->rxq_ids = rxq_info->rspq_id;
563         lli->nrxq = rxq_info->nrxq;
564         lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
565         lli->nciq = rxq_info->nciq;
566         lli->ntxq = txq_info->ntxq;
567 }
568
569 int t4_uld_mem_alloc(struct adapter *adap)
570 {
571         struct sge *s = &adap->sge;
572
573         adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
574         if (!adap->uld)
575                 return -ENOMEM;
576
577         s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
578                                   sizeof(struct sge_uld_rxq_info *),
579                                   GFP_KERNEL);
580         if (!s->uld_rxq_info)
581                 goto err_uld;
582
583         s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
584                                   sizeof(struct sge_uld_txq_info *),
585                                   GFP_KERNEL);
586         if (!s->uld_txq_info)
587                 goto err_uld_rx;
588         return 0;
589
590 err_uld_rx:
591         kfree(s->uld_rxq_info);
592 err_uld:
593         kfree(adap->uld);
594         return -ENOMEM;
595 }
596
597 void t4_uld_mem_free(struct adapter *adap)
598 {
599         struct sge *s = &adap->sge;
600
601         kfree(s->uld_txq_info);
602         kfree(s->uld_rxq_info);
603         kfree(adap->uld);
604 }
605
606 /* This function should be called with uld_mutex taken. */
607 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
608 {
609         if (adap->uld[type].handle) {
610                 adap->uld[type].handle = NULL;
611                 adap->uld[type].add = NULL;
612                 release_sge_txq_uld(adap, type);
613
614                 if (adap->flags & FULL_INIT_DONE)
615                         quiesce_rx_uld(adap, type);
616
617                 if (adap->flags & USING_MSIX)
618                         free_msix_queue_irqs_uld(adap, type);
619
620                 free_sge_queues_uld(adap, type);
621                 free_queues_uld(adap, type);
622         }
623 }
624
625 void t4_uld_clean_up(struct adapter *adap)
626 {
627         unsigned int i;
628
629         mutex_lock(&uld_mutex);
630         for (i = 0; i < CXGB4_ULD_MAX; i++) {
631                 if (!adap->uld[i].handle)
632                         continue;
633
634                 cxgb4_shutdown_uld_adapter(adap, i);
635         }
636         mutex_unlock(&uld_mutex);
637 }
638
639 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
640 {
641         int i;
642
643         lld->pdev = adap->pdev;
644         lld->pf = adap->pf;
645         lld->l2t = adap->l2t;
646         lld->tids = &adap->tids;
647         lld->ports = adap->port;
648         lld->vr = &adap->vres;
649         lld->mtus = adap->params.mtus;
650         lld->nchan = adap->params.nports;
651         lld->nports = adap->params.nports;
652         lld->wr_cred = adap->params.ofldq_wr_cred;
653         lld->crypto = adap->params.crypto;
654         lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
655         lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
656         lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
657         lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
658         lld->iscsi_ppm = &adap->iscsi_ppm;
659         lld->adapter_type = adap->params.chip;
660         lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661         lld->udb_density = 1 << adap->params.sge.eq_qpp;
662         lld->ucq_density = 1 << adap->params.sge.iq_qpp;
663         lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
664         lld->filt_mode = adap->params.tp.vlan_pri_map;
665         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
666         for (i = 0; i < NCHAN; i++)
667                 lld->tx_modq[i] = i;
668         lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
669         lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
670         lld->fw_vers = adap->params.fw_vers;
671         lld->dbfifo_int_thresh = dbfifo_int_thresh;
672         lld->sge_ingpadboundary = adap->sge.fl_align;
673         lld->sge_egrstatuspagesize = adap->sge.stat_len;
674         lld->sge_pktshift = adap->sge.pktshift;
675         lld->ulp_crypto = adap->params.crypto;
676         lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
677         lld->max_ordird_qp = adap->params.max_ordird_qp;
678         lld->max_ird_adapter = adap->params.max_ird_adapter;
679         lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
680         lld->nodeid = dev_to_node(adap->pdev_dev);
681         lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
682         lld->write_w_imm_support = adap->params.write_w_imm_support;
683         lld->write_cmpl_support = adap->params.write_cmpl_support;
684 }
685
686 static void uld_attach(struct adapter *adap, unsigned int uld)
687 {
688         void *handle;
689         struct cxgb4_lld_info lli;
690
691         uld_init(adap, &lli);
692         uld_queue_init(adap, uld, &lli);
693
694         handle = adap->uld[uld].add(&lli);
695         if (IS_ERR(handle)) {
696                 dev_warn(adap->pdev_dev,
697                          "could not attach to the %s driver, error %ld\n",
698                          adap->uld[uld].name, PTR_ERR(handle));
699                 return;
700         }
701
702         adap->uld[uld].handle = handle;
703         t4_register_netevent_notifier();
704
705         if (adap->flags & FULL_INIT_DONE)
706                 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
707 }
708
709 /**
710  *      cxgb4_register_uld - register an upper-layer driver
711  *      @type: the ULD type
712  *      @p: the ULD methods
713  *
714  *      Registers an upper-layer driver with this driver and notifies the ULD
715  *      about any presently available devices that support its type.  Returns
716  *      %-EBUSY if a ULD of the same type is already registered.
717  */
718 void cxgb4_register_uld(enum cxgb4_uld type,
719                         const struct cxgb4_uld_info *p)
720 {
721         int ret = 0;
722         struct adapter *adap;
723
724         if (type >= CXGB4_ULD_MAX)
725                 return;
726
727         mutex_lock(&uld_mutex);
728         list_for_each_entry(adap, &adapter_list, list_node) {
729                 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
730                     (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
731                         continue;
732                 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
733                         continue;
734                 ret = cfg_queues_uld(adap, type, p);
735                 if (ret)
736                         goto out;
737                 ret = setup_sge_queues_uld(adap, type, p->lro);
738                 if (ret)
739                         goto free_queues;
740                 if (adap->flags & USING_MSIX) {
741                         name_msix_vecs_uld(adap, type);
742                         ret = request_msix_queue_irqs_uld(adap, type);
743                         if (ret)
744                                 goto free_rxq;
745                 }
746                 if (adap->flags & FULL_INIT_DONE)
747                         enable_rx_uld(adap, type);
748                 if (adap->uld[type].add)
749                         goto free_irq;
750                 ret = setup_sge_txq_uld(adap, type, p);
751                 if (ret)
752                         goto free_irq;
753                 adap->uld[type] = *p;
754                 uld_attach(adap, type);
755                 continue;
756 free_irq:
757                 if (adap->flags & FULL_INIT_DONE)
758                         quiesce_rx_uld(adap, type);
759                 if (adap->flags & USING_MSIX)
760                         free_msix_queue_irqs_uld(adap, type);
761 free_rxq:
762                 free_sge_queues_uld(adap, type);
763 free_queues:
764                 free_queues_uld(adap, type);
765 out:
766                 dev_warn(adap->pdev_dev,
767                          "ULD registration failed for uld type %d\n", type);
768         }
769         mutex_unlock(&uld_mutex);
770         return;
771 }
772 EXPORT_SYMBOL(cxgb4_register_uld);
773
774 /**
775  *      cxgb4_unregister_uld - unregister an upper-layer driver
776  *      @type: the ULD type
777  *
778  *      Unregisters an existing upper-layer driver.
779  */
780 int cxgb4_unregister_uld(enum cxgb4_uld type)
781 {
782         struct adapter *adap;
783
784         if (type >= CXGB4_ULD_MAX)
785                 return -EINVAL;
786
787         mutex_lock(&uld_mutex);
788         list_for_each_entry(adap, &adapter_list, list_node) {
789                 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
790                     (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
791                         continue;
792                 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
793                         continue;
794
795                 cxgb4_shutdown_uld_adapter(adap, type);
796         }
797         mutex_unlock(&uld_mutex);
798
799         return 0;
800 }
801 EXPORT_SYMBOL(cxgb4_unregister_uld);