2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/delay.h>
39 #include <linux/idr.h>
40 #include <linux/module.h>
41 #include <linux/printk.h>
42 #ifdef CONFIG_INFINIBAND_QIB_DCA
43 #include <linux/dca.h>
45 #include <rdma/rdma_vt.h>
48 #include "qib_common.h"
50 #ifdef CONFIG_DEBUG_FS
51 #include "qib_debugfs.h"
52 #include "qib_verbs.h"
56 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
59 * min buffers we want to have per context, after driver
61 #define QIB_MIN_USER_CTXT_BUFCNT 7
63 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
64 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24
65 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
68 * Number of ctxts we are configured to use (to allow for more pio
69 * buffers per ctxt, etc.) Zero means use chip value.
72 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
73 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
75 unsigned qib_numa_aware;
76 module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
77 MODULE_PARM_DESC(numa_aware,
78 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
81 * If set, do not write to any regs if avoidable, hack to allow
82 * check for deranged default register values.
85 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
86 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
88 unsigned qib_n_krcv_queues;
89 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
90 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
92 unsigned qib_cc_table_size;
93 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
94 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
96 static void verify_interrupt(unsigned long);
98 static struct idr qib_unit_table;
99 u32 qib_cpulist_count;
100 unsigned long *qib_cpulist;
102 /* set number of contexts we'll actually use */
103 void qib_set_ctxtcnt(struct qib_devdata *dd)
106 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
107 if (dd->cfgctxts > dd->ctxtcnt)
108 dd->cfgctxts = dd->ctxtcnt;
109 } else if (qib_cfgctxts < dd->num_pports)
110 dd->cfgctxts = dd->ctxtcnt;
111 else if (qib_cfgctxts <= dd->ctxtcnt)
112 dd->cfgctxts = qib_cfgctxts;
114 dd->cfgctxts = dd->ctxtcnt;
115 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
116 dd->cfgctxts - dd->first_user_ctxt;
120 * Common code for creating the receive context array.
122 int qib_create_ctxts(struct qib_devdata *dd)
125 int local_node_id = pcibus_to_node(dd->pcidev->bus);
127 if (local_node_id < 0)
128 local_node_id = numa_node_id();
129 dd->assigned_node_id = local_node_id;
132 * Allocate full ctxtcnt array, rather than just cfgctxts, because
133 * cleanup iterates across all possible ctxts.
135 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
139 /* create (one or more) kctxt */
140 for (i = 0; i < dd->first_user_ctxt; ++i) {
141 struct qib_pportdata *ppd;
142 struct qib_ctxtdata *rcd;
144 if (dd->skip_kctxt_mask & (1 << i))
147 ppd = dd->pport + (i % dd->num_pports);
149 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
152 "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
157 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
164 * Common code for user and kernel context setup.
166 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
169 struct qib_devdata *dd = ppd->dd;
170 struct qib_ctxtdata *rcd;
172 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
174 INIT_LIST_HEAD(&rcd->qp_wait_list);
175 rcd->node_id = node_id;
181 #ifdef CONFIG_DEBUG_FS
182 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
183 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
184 GFP_KERNEL, node_id);
188 "Unable to allocate per ctxt stats buffer\n");
193 dd->f_init_ctxt(rcd);
196 * To avoid wasting a lot of memory, we allocate 32KB chunks
197 * of physically contiguous memory, advance through it until
198 * used up and then allocate more. Of course, we need
199 * memory to store those extra pointers, now. 32KB seems to
200 * be the most that is "safe" under memory pressure
201 * (creating large files and then copying them over
202 * NFS while doing lots of MPI jobs). The OOM killer can
203 * get invoked, even though we say we can sleep and this can
204 * cause significant system problems....
206 rcd->rcvegrbuf_size = 0x8000;
207 rcd->rcvegrbufs_perchunk =
208 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
209 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
210 rcd->rcvegrbufs_perchunk - 1) /
211 rcd->rcvegrbufs_perchunk;
212 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
213 rcd->rcvegrbufs_perchunk_shift =
214 ilog2(rcd->rcvegrbufs_perchunk);
220 * Common code for initializing the physical port structure.
222 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
228 ppd->hw_pidx = hw_pidx;
229 ppd->port = port; /* IB port number, not index */
231 spin_lock_init(&ppd->sdma_lock);
232 spin_lock_init(&ppd->lflags_lock);
233 spin_lock_init(&ppd->cc_shadow_lock);
234 init_waitqueue_head(&ppd->state_wait);
236 setup_timer(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup,
240 ppd->ibport_data.pmastats =
241 alloc_percpu(struct qib_pma_counters);
242 if (!ppd->ibport_data.pmastats)
244 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
245 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
246 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
247 if (!(ppd->ibport_data.rvp.rc_acks) ||
248 !(ppd->ibport_data.rvp.rc_qacks) ||
249 !(ppd->ibport_data.rvp.rc_delayed_comp))
252 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
255 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
256 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
258 ppd->cc_max_table_entries =
259 ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
261 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
263 ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
264 if (!ppd->ccti_entries)
267 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
268 ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
269 if (!ppd->congestion_entries)
272 size = sizeof(struct cc_table_shadow);
273 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
274 if (!ppd->ccti_entries_shadow)
277 size = sizeof(struct ib_cc_congestion_setting_attr);
278 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
279 if (!ppd->congestion_entries_shadow)
285 kfree(ppd->ccti_entries_shadow);
286 ppd->ccti_entries_shadow = NULL;
288 kfree(ppd->congestion_entries);
289 ppd->congestion_entries = NULL;
291 kfree(ppd->ccti_entries);
292 ppd->ccti_entries = NULL;
294 /* User is intentionally disabling the congestion control agent */
295 if (!qib_cc_table_size)
298 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
299 qib_cc_table_size = 0;
301 "Congestion Control table size %d less than minimum %d for port %d\n",
302 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
305 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
310 static int init_pioavailregs(struct qib_devdata *dd)
315 dd->pioavailregs_dma = dma_alloc_coherent(
316 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
318 if (!dd->pioavailregs_dma) {
320 "failed to allocate PIOavail reg area in memory\n");
326 * We really want L2 cache aligned, but for current CPUs of
327 * interest, they are the same.
329 status_page = (u64 *)
330 ((char *) dd->pioavailregs_dma +
331 ((2 * L1_CACHE_BYTES +
332 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
333 /* device status comes first, for backwards compatibility */
334 dd->devstatusp = status_page;
336 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
337 dd->pport[pidx].statusp = status_page;
342 * Setup buffer to hold freeze and other messages, accessible to
343 * apps, following statusp. This is per-unit, not per port.
345 dd->freezemsg = (char *) status_page;
347 /* length of msg buffer is "whatever is left" */
348 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
349 dd->freezelen = PAGE_SIZE - ret;
358 * init_shadow_tids - allocate the shadow TID array
359 * @dd: the qlogic_ib device
361 * allocate the shadow TID array, so we can qib_munlock previous
362 * entries. It may make more sense to move the pageshadow to the
363 * ctxt data structure, so we only allocate memory for ctxts actually
364 * in use, since we at 8k per ctxt, now.
365 * We don't want failures here to prevent use of the driver/chip,
366 * so no return value.
368 static void init_shadow_tids(struct qib_devdata *dd)
373 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
377 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
381 dd->pageshadow = pages;
382 dd->physshadow = addrs;
388 dd->pageshadow = NULL;
392 * Do initialization for device that is only needed on
393 * first detect, not on resets.
395 static int loadtime_init(struct qib_devdata *dd)
399 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
400 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
402 "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
404 (int)(dd->revision >>
405 QLOGIC_IB_R_SOFTWARE_SHIFT) &
406 QLOGIC_IB_R_SOFTWARE_MASK,
407 (unsigned long long) dd->revision);
412 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
413 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
415 spin_lock_init(&dd->pioavail_lock);
416 spin_lock_init(&dd->sendctrl_lock);
417 spin_lock_init(&dd->uctxt_lock);
418 spin_lock_init(&dd->qib_diag_trans_lock);
419 spin_lock_init(&dd->eep_st_lock);
420 mutex_init(&dd->eep_lock);
425 ret = init_pioavailregs(dd);
426 init_shadow_tids(dd);
428 qib_get_eeprom_info(dd);
430 /* setup time (don't start yet) to verify we got interrupt */
431 setup_timer(&dd->intrchk_timer, verify_interrupt,
438 * init_after_reset - re-initialize after a reset
439 * @dd: the qlogic_ib device
441 * sanity check at least some of the values after reset, and
442 * ensure no receive or transmit (explicitly, in case reset
445 static int init_after_reset(struct qib_devdata *dd)
450 * Ensure chip does no sends or receives, tail updates, or
451 * pioavail updates while we re-initialize. This is mostly
452 * for the driver data structures, not chip registers.
454 for (i = 0; i < dd->num_pports; ++i) {
456 * ctxt == -1 means "all contexts". Only really safe for
457 * _dis_abling things, as here.
459 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
460 QIB_RCVCTRL_INTRAVAIL_DIS |
461 QIB_RCVCTRL_TAILUPD_DIS, -1);
462 /* Redundant across ports for some, but no big deal. */
463 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
464 QIB_SENDCTRL_AVAIL_DIS);
470 static void enable_chip(struct qib_devdata *dd)
476 * Enable PIO send, and update of PIOavail regs to memory.
478 for (i = 0; i < dd->num_pports; ++i)
479 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
480 QIB_SENDCTRL_AVAIL_ENB);
482 * Enable kernel ctxts' receive and receive interrupt.
483 * Other ctxts done as user opens and inits them.
485 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
486 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
487 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
488 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
489 struct qib_ctxtdata *rcd = dd->rcd[i];
492 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
496 static void verify_interrupt(unsigned long opaque)
498 struct qib_devdata *dd = (struct qib_devdata *) opaque;
502 return; /* being torn down */
505 * If we don't have a lid or any interrupts, let the user know and
506 * don't bother checking again.
508 int_counter = qib_int_counter(dd) - dd->z_int_counter;
509 if (int_counter == 0) {
510 if (!dd->f_intr_fallback(dd))
511 dev_err(&dd->pcidev->dev,
512 "No interrupts detected, not usable.\n");
513 else /* re-arm the timer to see if fallback works */
514 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
518 static void init_piobuf_state(struct qib_devdata *dd)
524 * Ensure all buffers are free, and fifos empty. Buffers
525 * are common, so only do once for port 0.
527 * After enable and qib_chg_pioavailkernel so we can safely
528 * enable pioavail updates and PIOENABLE. After this, packets
529 * are ready and able to go out.
531 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
532 for (pidx = 0; pidx < dd->num_pports; ++pidx)
533 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
536 * If not all sendbufs are used, add the one to each of the lower
537 * numbered contexts. pbufsctxt and lastctxt_piobuf are
538 * calculated in chip-specific code because it may cause some
539 * chip-specific adjustments to be made.
541 uctxts = dd->cfgctxts - dd->first_user_ctxt;
542 dd->ctxts_extrabuf = dd->pbufsctxt ?
543 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
546 * Set up the shadow copies of the piobufavail registers,
547 * which we compare against the chip registers for now, and
548 * the in memory DMA'ed copies of the registers.
549 * By now pioavail updates to memory should have occurred, so
550 * copy them into our working/shadow registers; this is in
551 * case something went wrong with abort, but mostly to get the
552 * initial values of the generation bit correct.
554 for (i = 0; i < dd->pioavregs; i++) {
557 tmp = dd->pioavailregs_dma[i];
559 * Don't need to worry about pioavailkernel here
560 * because we will call qib_chg_pioavailkernel() later
561 * in initialization, to busy out buffers as needed.
563 dd->pioavailshadow[i] = le64_to_cpu(tmp);
565 while (i < ARRAY_SIZE(dd->pioavailshadow))
566 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
568 /* after pioavailshadow is setup */
569 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
570 TXCHK_CHG_TYPE_KERN, NULL);
571 dd->f_initvl15_bufs(dd);
575 * qib_create_workqueues - create per port workqueues
576 * @dd: the qlogic_ib device
578 static int qib_create_workqueues(struct qib_devdata *dd)
581 struct qib_pportdata *ppd;
583 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
584 ppd = dd->pport + pidx;
586 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
588 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
590 ppd->qib_wq = alloc_ordered_workqueue(wq_name,
598 pr_err("create_singlethread_workqueue failed for port %d\n",
600 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
601 ppd = dd->pport + pidx;
603 destroy_workqueue(ppd->qib_wq);
610 static void qib_free_pportdata(struct qib_pportdata *ppd)
612 free_percpu(ppd->ibport_data.pmastats);
613 free_percpu(ppd->ibport_data.rvp.rc_acks);
614 free_percpu(ppd->ibport_data.rvp.rc_qacks);
615 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
616 ppd->ibport_data.pmastats = NULL;
620 * qib_init - do the actual initialization sequence on the chip
621 * @dd: the qlogic_ib device
622 * @reinit: reinitializing, so don't allocate new memory
624 * Do the actual initialization sequence on the chip. This is done
625 * both from the init routine called from the PCI infrastructure, and
626 * when we reset the chip, or detect that it was reset internally,
627 * or it's administratively re-enabled.
629 * Memory allocation here and in called routines is only done in
630 * the first case (reinit == 0). We have to be careful, because even
631 * without memory allocation, we need to re-write all the chip registers
632 * TIDs, etc. after the reset or enable has completed.
634 int qib_init(struct qib_devdata *dd, int reinit)
636 int ret = 0, pidx, lastfail = 0;
639 struct qib_ctxtdata *rcd;
640 struct qib_pportdata *ppd;
643 /* Set linkstate to unknown, so we can watch for a transition. */
644 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
645 ppd = dd->pport + pidx;
646 spin_lock_irqsave(&ppd->lflags_lock, flags);
647 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
648 QIBL_LINKDOWN | QIBL_LINKINIT |
650 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
654 ret = init_after_reset(dd);
656 ret = loadtime_init(dd);
660 /* Bypass most chip-init, to get to device creation */
664 ret = dd->f_late_initreg(dd);
668 /* dd->rcd can be NULL if early init failed */
669 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
671 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
672 * re-init, the simplest way to handle this is to free
673 * existing, and re-allocate.
674 * Need to re-create rest of ctxt 0 ctxtdata as well.
680 lastfail = qib_create_rcvhdrq(dd, rcd);
682 lastfail = qib_setup_eagerbufs(rcd);
685 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
690 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
695 ppd = dd->pport + pidx;
696 mtu = ib_mtu_enum_to_int(qib_ibmtu);
698 mtu = QIB_DEFAULT_MTU;
699 qib_ibmtu = 0; /* don't leave invalid value */
701 /* set max we can ever have for this driver load */
702 ppd->init_ibmaxlen = min(mtu > 2048 ?
703 dd->piosize4k : dd->piosize2k,
705 (dd->rcvhdrentsize << 2));
707 * Have to initialize ibmaxlen, but this will normally
708 * change immediately in qib_set_mtu().
710 ppd->ibmaxlen = ppd->init_ibmaxlen;
711 qib_set_mtu(ppd, mtu);
713 spin_lock_irqsave(&ppd->lflags_lock, flags);
714 ppd->lflags |= QIBL_IB_LINK_DISABLED;
715 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
717 lastfail = dd->f_bringup_serdes(ppd);
719 qib_devinfo(dd->pcidev,
720 "Failed to bringup IB port %u\n", ppd->port);
721 lastfail = -ENETDOWN;
729 /* none of the ports initialized */
730 if (!ret && lastfail)
734 /* but continue on, so we can debug cause */
739 init_piobuf_state(dd);
743 /* chip is OK for user apps; mark it as initialized */
744 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
745 ppd = dd->pport + pidx;
747 * Set status even if port serdes is not initialized
748 * so that diags will work.
750 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
752 if (!ppd->link_speed_enabled)
754 if (dd->flags & QIB_HAS_SEND_DMA)
755 ret = qib_setup_sdma(ppd);
756 setup_timer(&ppd->hol_timer, qib_hol_event,
758 ppd->hol_state = QIB_HOL_UP;
761 /* now we can enable all interrupts from the chip */
762 dd->f_set_intr_state(dd, 1);
765 * Setup to verify we get an interrupt, and fallback
766 * to an alternate if necessary and possible.
768 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
769 /* start stats retrieval timer */
770 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
773 /* if ret is non-zero, we probably should do some cleanup here... */
778 * These next two routines are placeholders in case we don't have per-arch
779 * code for controlling write combining. If explicit control of write
780 * combining is not available, performance will probably be awful.
783 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
788 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
792 static inline struct qib_devdata *__qib_lookup(int unit)
794 return idr_find(&qib_unit_table, unit);
797 struct qib_devdata *qib_lookup(int unit)
799 struct qib_devdata *dd;
802 spin_lock_irqsave(&qib_devs_lock, flags);
803 dd = __qib_lookup(unit);
804 spin_unlock_irqrestore(&qib_devs_lock, flags);
810 * Stop the timers during unit shutdown, or after an error late
813 static void qib_stop_timers(struct qib_devdata *dd)
815 struct qib_pportdata *ppd;
818 if (dd->stats_timer.data) {
819 del_timer_sync(&dd->stats_timer);
820 dd->stats_timer.data = 0;
822 if (dd->intrchk_timer.data) {
823 del_timer_sync(&dd->intrchk_timer);
824 dd->intrchk_timer.data = 0;
826 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
827 ppd = dd->pport + pidx;
828 if (ppd->hol_timer.data)
829 del_timer_sync(&ppd->hol_timer);
830 if (ppd->led_override_timer.data) {
831 del_timer_sync(&ppd->led_override_timer);
832 atomic_set(&ppd->led_override_timer_active, 0);
834 if (ppd->symerr_clear_timer.data)
835 del_timer_sync(&ppd->symerr_clear_timer);
840 * qib_shutdown_device - shut down a device
841 * @dd: the qlogic_ib device
843 * This is called to make the device quiet when we are about to
844 * unload the driver, and also when the device is administratively
845 * disabled. It does not free any data structures.
846 * Everything it does has to be setup again by qib_init(dd, 1)
848 static void qib_shutdown_device(struct qib_devdata *dd)
850 struct qib_pportdata *ppd;
853 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
854 ppd = dd->pport + pidx;
856 spin_lock_irq(&ppd->lflags_lock);
857 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
858 QIBL_LINKARMED | QIBL_LINKACTIVE |
860 spin_unlock_irq(&ppd->lflags_lock);
861 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
863 dd->flags &= ~QIB_INITTED;
865 /* mask interrupts, but not errors */
866 dd->f_set_intr_state(dd, 0);
868 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
869 ppd = dd->pport + pidx;
870 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
871 QIB_RCVCTRL_CTXT_DIS |
872 QIB_RCVCTRL_INTRAVAIL_DIS |
873 QIB_RCVCTRL_PKEY_ENB, -1);
875 * Gracefully stop all sends allowing any in progress to
878 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
882 * Enough for anything that's going to trickle out to have actually
887 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
888 ppd = dd->pport + pidx;
889 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
891 if (dd->flags & QIB_HAS_SEND_DMA)
892 qib_teardown_sdma(ppd);
894 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
895 QIB_SENDCTRL_SEND_DIS);
897 * Clear SerdesEnable.
898 * We can't count on interrupts since we are stopping.
900 dd->f_quiet_serdes(ppd);
903 destroy_workqueue(ppd->qib_wq);
906 qib_free_pportdata(ppd);
912 * qib_free_ctxtdata - free a context's allocated data
913 * @dd: the qlogic_ib device
914 * @rcd: the ctxtdata structure
916 * free up any allocated data for a context
917 * This should not touch anything that would affect a simultaneous
918 * re-allocation of context data, because it is called after qib_mutex
919 * is released (and can be called from reinit as well).
920 * It should never change any chip state, or global driver state.
922 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
928 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
929 rcd->rcvhdrq, rcd->rcvhdrq_phys);
931 if (rcd->rcvhdrtail_kvaddr) {
932 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
933 rcd->rcvhdrtail_kvaddr,
934 rcd->rcvhdrqtailaddr_phys);
935 rcd->rcvhdrtail_kvaddr = NULL;
938 if (rcd->rcvegrbuf) {
941 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
942 void *base = rcd->rcvegrbuf[e];
943 size_t size = rcd->rcvegrbuf_size;
945 dma_free_coherent(&dd->pcidev->dev, size,
946 base, rcd->rcvegrbuf_phys[e]);
948 kfree(rcd->rcvegrbuf);
949 rcd->rcvegrbuf = NULL;
950 kfree(rcd->rcvegrbuf_phys);
951 rcd->rcvegrbuf_phys = NULL;
952 rcd->rcvegrbuf_chunks = 0;
955 kfree(rcd->tid_pg_list);
956 vfree(rcd->user_event_mask);
957 vfree(rcd->subctxt_uregbase);
958 vfree(rcd->subctxt_rcvegrbuf);
959 vfree(rcd->subctxt_rcvhdr_base);
960 #ifdef CONFIG_DEBUG_FS
968 * Perform a PIO buffer bandwidth write test, to verify proper system
969 * configuration. Even when all the setup calls work, occasionally
970 * BIOS or other issues can prevent write combining from working, or
971 * can cause other bandwidth problems to the chip.
973 * This test simply writes the same buffer over and over again, and
974 * measures close to the peak bandwidth to the chip (not testing
975 * data bandwidth to the wire). On chips that use an address-based
976 * trigger to send packets to the wire, this is easy. On chips that
977 * use a count to trigger, we want to make sure that the packet doesn't
978 * go out on the wire, or trigger flow control checks.
980 static void qib_verify_pioperf(struct qib_devdata *dd)
982 u32 pbnum, cnt, lcnt;
987 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
989 qib_devinfo(dd->pcidev,
990 "No PIObufs for checking perf, skipping\n");
995 * Enough to give us a reasonable test, less than piobuf size, and
996 * likely multiple of store buffer length.
1000 addr = vmalloc(cnt);
1004 preempt_disable(); /* we want reasonably accurate elapsed time */
1005 msecs = 1 + jiffies_to_msecs(jiffies);
1006 for (lcnt = 0; lcnt < 10000U; lcnt++) {
1007 /* wait until we cross msec boundary */
1008 if (jiffies_to_msecs(jiffies) >= msecs)
1013 dd->f_set_armlaunch(dd, 0);
1016 * length 0, no dwords actually sent
1022 * This is only roughly accurate, since even with preempt we
1023 * still take interrupts that could take a while. Running for
1024 * >= 5 msec seems to get us "close enough" to accurate values.
1026 msecs = jiffies_to_msecs(jiffies);
1027 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
1028 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
1029 emsecs = jiffies_to_msecs(jiffies) - msecs;
1032 /* 1 GiB/sec, slightly over IB SDR line rate */
1033 if (lcnt < (emsecs * 1024U))
1035 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
1036 lcnt / (u32) emsecs);
1043 /* disarm piobuf, so it's available again */
1044 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1045 qib_sendbuf_done(dd, pbnum);
1046 dd->f_set_armlaunch(dd, 1);
1049 void qib_free_devdata(struct qib_devdata *dd)
1051 unsigned long flags;
1053 spin_lock_irqsave(&qib_devs_lock, flags);
1054 idr_remove(&qib_unit_table, dd->unit);
1055 list_del(&dd->list);
1056 spin_unlock_irqrestore(&qib_devs_lock, flags);
1058 #ifdef CONFIG_DEBUG_FS
1059 qib_dbg_ibdev_exit(&dd->verbs_dev);
1061 free_percpu(dd->int_counter);
1062 rvt_dealloc_device(&dd->verbs_dev.rdi);
1065 u64 qib_int_counter(struct qib_devdata *dd)
1068 u64 int_counter = 0;
1070 for_each_possible_cpu(cpu)
1071 int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1075 u64 qib_sps_ints(void)
1077 unsigned long flags;
1078 struct qib_devdata *dd;
1081 spin_lock_irqsave(&qib_devs_lock, flags);
1082 list_for_each_entry(dd, &qib_dev_list, list) {
1083 sps_ints += qib_int_counter(dd);
1085 spin_unlock_irqrestore(&qib_devs_lock, flags);
1090 * Allocate our primary per-unit data structure. Must be done via verbs
1091 * allocator, because the verbs cleanup process both does cleanup and
1092 * free of the data structure.
1093 * "extra" is for chip-specific data.
1095 * Use the idr mechanism to get a unit number for this unit.
1097 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1099 unsigned long flags;
1100 struct qib_devdata *dd;
1103 /* extra is * number of ports */
1104 nports = extra / sizeof(struct qib_pportdata);
1105 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1108 return ERR_PTR(-ENOMEM);
1110 INIT_LIST_HEAD(&dd->list);
1112 idr_preload(GFP_KERNEL);
1113 spin_lock_irqsave(&qib_devs_lock, flags);
1115 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1118 list_add(&dd->list, &qib_dev_list);
1121 spin_unlock_irqrestore(&qib_devs_lock, flags);
1125 qib_early_err(&pdev->dev,
1126 "Could not allocate unit ID: error %d\n", -ret);
1129 dd->int_counter = alloc_percpu(u64);
1130 if (!dd->int_counter) {
1132 qib_early_err(&pdev->dev,
1133 "Could not allocate per-cpu int_counter\n");
1137 if (!qib_cpulist_count) {
1138 u32 count = num_online_cpus();
1140 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
1141 sizeof(long), GFP_KERNEL);
1143 qib_cpulist_count = count;
1145 #ifdef CONFIG_DEBUG_FS
1146 qib_dbg_ibdev_init(&dd->verbs_dev);
1150 if (!list_empty(&dd->list))
1151 list_del_init(&dd->list);
1152 rvt_dealloc_device(&dd->verbs_dev.rdi);
1153 return ERR_PTR(ret);
1157 * Called from freeze mode handlers, and from PCI error
1158 * reporting code. Should be paranoid about state of
1159 * system and data structures.
1161 void qib_disable_after_error(struct qib_devdata *dd)
1163 if (dd->flags & QIB_INITTED) {
1166 dd->flags &= ~QIB_INITTED;
1168 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1169 struct qib_pportdata *ppd;
1171 ppd = dd->pport + pidx;
1172 if (dd->flags & QIB_PRESENT) {
1173 qib_set_linkstate(ppd,
1174 QIB_IB_LINKDOWN_DISABLE);
1175 dd->f_setextled(ppd, 0);
1177 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1182 * Mark as having had an error for driver, and also
1183 * for /sys and status word mapped to user programs.
1184 * This marks unit as not usable, until reset.
1187 *dd->devstatusp |= QIB_STATUS_HWERROR;
1190 static void qib_remove_one(struct pci_dev *);
1191 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1193 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1194 #define PFX QIB_DRV_NAME ": "
1196 static const struct pci_device_id qib_pci_tbl[] = {
1197 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1198 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1199 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1203 MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1205 static struct pci_driver qib_driver = {
1206 .name = QIB_DRV_NAME,
1207 .probe = qib_init_one,
1208 .remove = qib_remove_one,
1209 .id_table = qib_pci_tbl,
1210 .err_handler = &qib_pci_err_handler,
1213 #ifdef CONFIG_INFINIBAND_QIB_DCA
1215 static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1216 static struct notifier_block dca_notifier = {
1217 .notifier_call = qib_notify_dca,
1222 static int qib_notify_dca_device(struct device *device, void *data)
1224 struct qib_devdata *dd = dev_get_drvdata(device);
1225 unsigned long event = *(unsigned long *)data;
1227 return dd->f_notify_dca(dd, event);
1230 static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1235 rval = driver_for_each_device(&qib_driver.driver, NULL,
1236 &event, qib_notify_dca_device);
1237 return rval ? NOTIFY_BAD : NOTIFY_DONE;
1243 * Do all the generic driver unit- and chip-independent memory
1244 * allocation and initialization.
1246 static int __init qib_ib_init(void)
1250 ret = qib_dev_init();
1255 * These must be called before the driver is registered with
1256 * the PCI subsystem.
1258 idr_init(&qib_unit_table);
1260 #ifdef CONFIG_INFINIBAND_QIB_DCA
1261 dca_register_notify(&dca_notifier);
1263 #ifdef CONFIG_DEBUG_FS
1266 ret = pci_register_driver(&qib_driver);
1268 pr_err("Unable to register driver: error %d\n", -ret);
1272 /* not fatal if it doesn't work */
1273 if (qib_init_qibfs())
1274 pr_err("Unable to register ipathfs\n");
1275 goto bail; /* all OK */
1278 #ifdef CONFIG_INFINIBAND_QIB_DCA
1279 dca_unregister_notify(&dca_notifier);
1281 #ifdef CONFIG_DEBUG_FS
1284 idr_destroy(&qib_unit_table);
1290 module_init(qib_ib_init);
1293 * Do the non-unit driver cleanup, memory free, etc. at unload.
1295 static void __exit qib_ib_cleanup(void)
1299 ret = qib_exit_qibfs();
1302 "Unable to cleanup counter filesystem: error %d\n",
1305 #ifdef CONFIG_INFINIBAND_QIB_DCA
1306 dca_unregister_notify(&dca_notifier);
1308 pci_unregister_driver(&qib_driver);
1309 #ifdef CONFIG_DEBUG_FS
1313 qib_cpulist_count = 0;
1316 idr_destroy(&qib_unit_table);
1320 module_exit(qib_ib_cleanup);
1322 /* this can only be called after a successful initialization */
1323 static void cleanup_device_data(struct qib_devdata *dd)
1327 struct qib_ctxtdata **tmp;
1328 unsigned long flags;
1330 /* users can't do anything more with chip */
1331 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1332 if (dd->pport[pidx].statusp)
1333 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1335 spin_lock(&dd->pport[pidx].cc_shadow_lock);
1337 kfree(dd->pport[pidx].congestion_entries);
1338 dd->pport[pidx].congestion_entries = NULL;
1339 kfree(dd->pport[pidx].ccti_entries);
1340 dd->pport[pidx].ccti_entries = NULL;
1341 kfree(dd->pport[pidx].ccti_entries_shadow);
1342 dd->pport[pidx].ccti_entries_shadow = NULL;
1343 kfree(dd->pport[pidx].congestion_entries_shadow);
1344 dd->pport[pidx].congestion_entries_shadow = NULL;
1346 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1351 if (dd->pioavailregs_dma) {
1352 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1353 (void *) dd->pioavailregs_dma,
1354 dd->pioavailregs_phys);
1355 dd->pioavailregs_dma = NULL;
1358 if (dd->pageshadow) {
1359 struct page **tmpp = dd->pageshadow;
1360 dma_addr_t *tmpd = dd->physshadow;
1363 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1364 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1365 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1367 for (i = ctxt_tidbase; i < maxtid; i++) {
1370 pci_unmap_page(dd->pcidev, tmpd[i],
1371 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1372 qib_release_user_pages(&tmpp[i], 1);
1377 dd->pageshadow = NULL;
1379 dd->physshadow = NULL;
1384 * Free any resources still in use (usually just kernel contexts)
1385 * at unload; we do for ctxtcnt, because that's what we allocate.
1386 * We acquire lock to be really paranoid that rcd isn't being
1387 * accessed from some interrupt-related code (that should not happen,
1388 * but best to be sure).
1390 spin_lock_irqsave(&dd->uctxt_lock, flags);
1393 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1394 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1395 struct qib_ctxtdata *rcd = tmp[ctxt];
1397 tmp[ctxt] = NULL; /* debugging paranoia */
1398 qib_free_ctxtdata(dd, rcd);
1404 * Clean up on unit shutdown, or error during unit load after
1405 * successful initialization.
1407 static void qib_postinit_cleanup(struct qib_devdata *dd)
1410 * Clean up chip-specific stuff.
1411 * We check for NULL here, because it's outside
1412 * the kregbase check, and we need to call it
1413 * after the free_irq. Thus it's possible that
1414 * the function pointers were never initialized.
1419 qib_pcie_ddcleanup(dd);
1421 cleanup_device_data(dd);
1423 qib_free_devdata(dd);
1426 static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1428 int ret, j, pidx, initfail;
1429 struct qib_devdata *dd = NULL;
1431 ret = qib_pcie_init(pdev, ent);
1436 * Do device-specific initialiation, function table setup, dd
1439 switch (ent->device) {
1440 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1441 #ifdef CONFIG_PCI_MSI
1442 dd = qib_init_iba6120_funcs(pdev, ent);
1444 qib_early_err(&pdev->dev,
1445 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1447 dd = ERR_PTR(-ENODEV);
1451 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1452 dd = qib_init_iba7220_funcs(pdev, ent);
1455 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1456 dd = qib_init_iba7322_funcs(pdev, ent);
1460 qib_early_err(&pdev->dev,
1461 "Failing on unknown Intel deviceid 0x%x\n",
1469 goto bail; /* error already printed */
1471 ret = qib_create_workqueues(dd);
1475 /* do the generic initialization */
1476 initfail = qib_init(dd, 0);
1478 ret = qib_register_ib_device(dd);
1481 * Now ready for use. this should be cleared whenever we
1482 * detect a reset, or initiate one. If earlier failure,
1483 * we still create devices, so diags, etc. can be used
1484 * to determine cause of problem.
1486 if (!qib_mini_init && !initfail && !ret)
1487 dd->flags |= QIB_INITTED;
1489 j = qib_device_create(dd);
1491 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1494 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1497 if (qib_mini_init || initfail || ret) {
1498 qib_stop_timers(dd);
1499 flush_workqueue(ib_wq);
1500 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1501 dd->f_quiet_serdes(dd->pport + pidx);
1505 (void) qibfs_remove(dd);
1506 qib_device_remove(dd);
1509 qib_unregister_ib_device(dd);
1510 qib_postinit_cleanup(dd);
1516 ret = qib_enable_wc(dd);
1519 "Write combining not enabled (err %d): performance may be poor\n",
1524 qib_verify_pioperf(dd);
1529 static void qib_remove_one(struct pci_dev *pdev)
1531 struct qib_devdata *dd = pci_get_drvdata(pdev);
1534 /* unregister from IB core */
1535 qib_unregister_ib_device(dd);
1538 * Disable the IB link, disable interrupts on the device,
1539 * clear dma engines, etc.
1542 qib_shutdown_device(dd);
1544 qib_stop_timers(dd);
1546 /* wait until all of our (qsfp) queue_work() calls complete */
1547 flush_workqueue(ib_wq);
1549 ret = qibfs_remove(dd);
1551 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1554 qib_device_remove(dd);
1556 qib_postinit_cleanup(dd);
1560 * qib_create_rcvhdrq - create a receive header queue
1561 * @dd: the qlogic_ib device
1562 * @rcd: the context data
1564 * This must be contiguous memory (from an i/o perspective), and must be
1565 * DMA'able (which means for some systems, it will go through an IOMMU,
1566 * or be forced into a low address range).
1568 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1573 if (!rcd->rcvhdrq) {
1574 dma_addr_t phys_hdrqtail;
1577 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1578 sizeof(u32), PAGE_SIZE);
1579 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1580 GFP_USER : GFP_KERNEL;
1582 old_node_id = dev_to_node(&dd->pcidev->dev);
1583 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1584 rcd->rcvhdrq = dma_alloc_coherent(
1585 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1586 gfp_flags | __GFP_COMP);
1587 set_dev_node(&dd->pcidev->dev, old_node_id);
1589 if (!rcd->rcvhdrq) {
1591 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1596 if (rcd->ctxt >= dd->first_user_ctxt) {
1597 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1598 if (!rcd->user_event_mask)
1599 goto bail_free_hdrq;
1602 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1603 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1604 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1605 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1607 set_dev_node(&dd->pcidev->dev, old_node_id);
1608 if (!rcd->rcvhdrtail_kvaddr)
1610 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1613 rcd->rcvhdrq_size = amt;
1616 /* clear for security and sanity on each use */
1617 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1618 if (rcd->rcvhdrtail_kvaddr)
1619 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1624 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1626 vfree(rcd->user_event_mask);
1627 rcd->user_event_mask = NULL;
1629 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1631 rcd->rcvhdrq = NULL;
1637 * allocate eager buffers, both kernel and user contexts.
1638 * @rcd: the context we are setting up.
1640 * Allocate the eager TID buffers and program them into hip.
1641 * They are no longer completely contiguous, we do multiple allocation
1642 * calls. Otherwise we get the OOM code involved, by asking for too
1643 * much per call, with disastrous results on some kernels.
1645 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1647 struct qib_devdata *dd = rcd->dd;
1648 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1654 * GFP_USER, but without GFP_FS, so buffer cache can be
1655 * coalesced (we hope); otherwise, even at order 4,
1656 * heavy filesystem activity makes these fail, and we can
1657 * use compound pages.
1659 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1661 egrcnt = rcd->rcvegrcnt;
1662 egroff = rcd->rcvegr_tid_base;
1663 egrsize = dd->rcvegrbufsize;
1665 chunk = rcd->rcvegrbuf_chunks;
1666 egrperchunk = rcd->rcvegrbufs_perchunk;
1667 size = rcd->rcvegrbuf_size;
1668 if (!rcd->rcvegrbuf) {
1670 kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]),
1671 GFP_KERNEL, rcd->node_id);
1672 if (!rcd->rcvegrbuf)
1675 if (!rcd->rcvegrbuf_phys) {
1676 rcd->rcvegrbuf_phys =
1677 kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
1678 GFP_KERNEL, rcd->node_id);
1679 if (!rcd->rcvegrbuf_phys)
1680 goto bail_rcvegrbuf;
1682 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1683 if (rcd->rcvegrbuf[e])
1686 old_node_id = dev_to_node(&dd->pcidev->dev);
1687 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1689 dma_alloc_coherent(&dd->pcidev->dev, size,
1690 &rcd->rcvegrbuf_phys[e],
1692 set_dev_node(&dd->pcidev->dev, old_node_id);
1693 if (!rcd->rcvegrbuf[e])
1694 goto bail_rcvegrbuf_phys;
1697 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1699 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1700 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1703 /* clear for security and sanity on each use */
1704 memset(rcd->rcvegrbuf[chunk], 0, size);
1706 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1707 dd->f_put_tid(dd, e + egroff +
1712 RCVHQ_RCV_TYPE_EAGER, pa);
1715 cond_resched(); /* don't hog the cpu */
1720 bail_rcvegrbuf_phys:
1721 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1722 dma_free_coherent(&dd->pcidev->dev, size,
1723 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1724 kfree(rcd->rcvegrbuf_phys);
1725 rcd->rcvegrbuf_phys = NULL;
1727 kfree(rcd->rcvegrbuf);
1728 rcd->rcvegrbuf = NULL;
1734 * Note: Changes to this routine should be mirrored
1735 * for the diagnostics routine qib_remap_ioaddr32().
1736 * There is also related code for VL15 buffers in qib_init_7322_variables().
1737 * The teardown code that unmaps is in qib_pcie_ddcleanup()
1739 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1741 u64 __iomem *qib_kregbase = NULL;
1742 void __iomem *qib_piobase = NULL;
1743 u64 __iomem *qib_userbase = NULL;
1745 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1746 u64 qib_pio4koffset = dd->piobufbase >> 32;
1747 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1748 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1749 u64 qib_physaddr = dd->physaddr;
1751 u64 qib_userlen = 0;
1754 * Free the old mapping because the kernel will try to reuse the
1755 * old mapping and not create a new mapping with the
1756 * write combining attribute.
1758 iounmap(dd->kregbase);
1759 dd->kregbase = NULL;
1762 * Assumes chip address space looks like:
1763 * - kregs + sregs + cregs + uregs (in any order)
1764 * - piobufs (2K and 4K bufs in either order)
1766 * - kregs + sregs + cregs (in any order)
1767 * - piobufs (2K and 4K bufs in either order)
1770 if (dd->piobcnt4k == 0) {
1771 qib_kreglen = qib_pio2koffset;
1772 qib_piolen = qib_pio2klen;
1773 } else if (qib_pio2koffset < qib_pio4koffset) {
1774 qib_kreglen = qib_pio2koffset;
1775 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1777 qib_kreglen = qib_pio4koffset;
1778 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1780 qib_piolen += vl15buflen;
1781 /* Map just the configured ports (not all hw ports) */
1782 if (dd->uregbase > qib_kreglen)
1783 qib_userlen = dd->ureg_align * dd->cfgctxts;
1785 /* Sanity checks passed, now create the new mappings */
1786 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1790 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1795 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1801 dd->kregbase = qib_kregbase;
1802 dd->kregend = (u64 __iomem *)
1803 ((char __iomem *) qib_kregbase + qib_kreglen);
1804 dd->piobase = qib_piobase;
1805 dd->pio2kbase = (void __iomem *)
1806 (((char __iomem *) dd->piobase) +
1807 qib_pio2koffset - qib_kreglen);
1809 dd->pio4kbase = (void __iomem *)
1810 (((char __iomem *) dd->piobase) +
1811 qib_pio4koffset - qib_kreglen);
1813 /* ureg will now be accessed relative to dd->userbase */
1814 dd->userbase = qib_userbase;
1818 iounmap(qib_piobase);
1820 iounmap(qib_kregbase);