HID: Add new force feedback driver for Mayflash game controller adapters
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / hfi1 / driver.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
50 #include <linux/io.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
57
58 #include "hfi.h"
59 #include "trace.h"
60 #include "qp.h"
61 #include "sdma.h"
62
63 #undef pr_fmt
64 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
65
66 /*
67  * The size has to be longer than this string, so we can append
68  * board/chip information to it in the initialization code.
69  */
70 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
71
72 DEFINE_SPINLOCK(hfi1_devs_lock);
73 LIST_HEAD(hfi1_dev_list);
74 DEFINE_MUTEX(hfi1_mutex);       /* general driver use */
75
76 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
77 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
78 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
79                  HFI1_DEFAULT_MAX_MTU));
80
81 unsigned int hfi1_cu = 1;
82 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
83 MODULE_PARM_DESC(cu, "Credit return units");
84
85 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
86 static int hfi1_caps_set(const char *, const struct kernel_param *);
87 static int hfi1_caps_get(char *, const struct kernel_param *);
88 static const struct kernel_param_ops cap_ops = {
89         .set = hfi1_caps_set,
90         .get = hfi1_caps_get
91 };
92 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
93 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
94
95 MODULE_LICENSE("Dual BSD/GPL");
96 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
97 MODULE_VERSION(HFI1_DRIVER_VERSION);
98
99 /*
100  * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
101  */
102 #define MAX_PKT_RECV 64
103 #define EGR_HEAD_UPDATE_THRESHOLD 16
104
105 struct hfi1_ib_stats hfi1_stats;
106
107 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
108 {
109         int ret = 0;
110         unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
111                 cap_mask = *cap_mask_ptr, value, diff,
112                 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
113                               HFI1_CAP_WRITABLE_MASK);
114
115         ret = kstrtoul(val, 0, &value);
116         if (ret) {
117                 pr_warn("Invalid module parameter value for 'cap_mask'\n");
118                 goto done;
119         }
120         /* Get the changed bits (except the locked bit) */
121         diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
122
123         /* Remove any bits that are not allowed to change after driver load */
124         if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
125                 pr_warn("Ignoring non-writable capability bits %#lx\n",
126                         diff & ~write_mask);
127                 diff &= write_mask;
128         }
129
130         /* Mask off any reserved bits */
131         diff &= ~HFI1_CAP_RESERVED_MASK;
132         /* Clear any previously set and changing bits */
133         cap_mask &= ~diff;
134         /* Update the bits with the new capability */
135         cap_mask |= (value & diff);
136         /* Check for any kernel/user restrictions */
137         diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
138                 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
139         cap_mask &= ~diff;
140         /* Set the bitmask to the final set */
141         *cap_mask_ptr = cap_mask;
142 done:
143         return ret;
144 }
145
146 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
147 {
148         unsigned long cap_mask = *(unsigned long *)kp->arg;
149
150         cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
151         cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
152
153         return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
154 }
155
156 const char *get_unit_name(int unit)
157 {
158         static char iname[16];
159
160         snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
161         return iname;
162 }
163
164 const char *get_card_name(struct rvt_dev_info *rdi)
165 {
166         struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
167         struct hfi1_devdata *dd = container_of(ibdev,
168                                                struct hfi1_devdata, verbs_dev);
169         return get_unit_name(dd->unit);
170 }
171
172 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
173 {
174         struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
175         struct hfi1_devdata *dd = container_of(ibdev,
176                                                struct hfi1_devdata, verbs_dev);
177         return dd->pcidev;
178 }
179
180 /*
181  * Return count of units with at least one port ACTIVE.
182  */
183 int hfi1_count_active_units(void)
184 {
185         struct hfi1_devdata *dd;
186         struct hfi1_pportdata *ppd;
187         unsigned long flags;
188         int pidx, nunits_active = 0;
189
190         spin_lock_irqsave(&hfi1_devs_lock, flags);
191         list_for_each_entry(dd, &hfi1_dev_list, list) {
192                 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
193                         continue;
194                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
195                         ppd = dd->pport + pidx;
196                         if (ppd->lid && ppd->linkup) {
197                                 nunits_active++;
198                                 break;
199                         }
200                 }
201         }
202         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
203         return nunits_active;
204 }
205
206 /*
207  * Return count of all units, optionally return in arguments
208  * the number of usable (present) units, and the number of
209  * ports that are up.
210  */
211 int hfi1_count_units(int *npresentp, int *nupp)
212 {
213         int nunits = 0, npresent = 0, nup = 0;
214         struct hfi1_devdata *dd;
215         unsigned long flags;
216         int pidx;
217         struct hfi1_pportdata *ppd;
218
219         spin_lock_irqsave(&hfi1_devs_lock, flags);
220
221         list_for_each_entry(dd, &hfi1_dev_list, list) {
222                 nunits++;
223                 if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
224                         npresent++;
225                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
226                         ppd = dd->pport + pidx;
227                         if (ppd->lid && ppd->linkup)
228                                 nup++;
229                 }
230         }
231
232         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
233
234         if (npresentp)
235                 *npresentp = npresent;
236         if (nupp)
237                 *nupp = nup;
238
239         return nunits;
240 }
241
242 /*
243  * Get address of eager buffer from it's index (allocated in chunks, not
244  * contiguous).
245  */
246 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
247                                u8 *update)
248 {
249         u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
250
251         *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
252         return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
253                         (offset * RCV_BUF_BLOCK_SIZE));
254 }
255
256 /*
257  * Validate and encode the a given RcvArray Buffer size.
258  * The function will check whether the given size falls within
259  * allowed size ranges for the respective type and, optionally,
260  * return the proper encoding.
261  */
262 inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
263 {
264         if (unlikely(!PAGE_ALIGNED(size)))
265                 return 0;
266         if (unlikely(size < MIN_EAGER_BUFFER))
267                 return 0;
268         if (size >
269             (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
270                 return 0;
271         if (encoded)
272                 *encoded = ilog2(size / PAGE_SIZE) + 1;
273         return 1;
274 }
275
276 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
277                        struct hfi1_packet *packet)
278 {
279         struct ib_header *rhdr = packet->hdr;
280         u32 rte = rhf_rcv_type_err(packet->rhf);
281         int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
282         struct hfi1_ibport *ibp = &ppd->ibport_data;
283         struct hfi1_devdata *dd = ppd->dd;
284         struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
285
286         if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
287                 return;
288
289         if (packet->rhf & RHF_TID_ERR) {
290                 /* For TIDERR and RC QPs preemptively schedule a NAK */
291                 struct ib_other_headers *ohdr = NULL;
292                 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
293                 u16 lid  = be16_to_cpu(rhdr->lrh[1]);
294                 u32 qp_num;
295                 u32 rcv_flags = 0;
296
297                 /* Sanity check packet */
298                 if (tlen < 24)
299                         goto drop;
300
301                 /* Check for GRH */
302                 if (lnh == HFI1_LRH_BTH) {
303                         ohdr = &rhdr->u.oth;
304                 } else if (lnh == HFI1_LRH_GRH) {
305                         u32 vtf;
306
307                         ohdr = &rhdr->u.l.oth;
308                         if (rhdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
309                                 goto drop;
310                         vtf = be32_to_cpu(rhdr->u.l.grh.version_tclass_flow);
311                         if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
312                                 goto drop;
313                         rcv_flags |= HFI1_HAS_GRH;
314                 } else {
315                         goto drop;
316                 }
317                 /* Get the destination QP number. */
318                 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
319                 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
320                         struct rvt_qp *qp;
321                         unsigned long flags;
322
323                         rcu_read_lock();
324                         qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
325                         if (!qp) {
326                                 rcu_read_unlock();
327                                 goto drop;
328                         }
329
330                         /*
331                          * Handle only RC QPs - for other QP types drop error
332                          * packet.
333                          */
334                         spin_lock_irqsave(&qp->r_lock, flags);
335
336                         /* Check for valid receive state. */
337                         if (!(ib_rvt_state_ops[qp->state] &
338                               RVT_PROCESS_RECV_OK)) {
339                                 ibp->rvp.n_pkt_drops++;
340                         }
341
342                         switch (qp->ibqp.qp_type) {
343                         case IB_QPT_RC:
344                                 hfi1_rc_hdrerr(
345                                         rcd,
346                                         rhdr,
347                                         rcv_flags,
348                                         qp);
349                                 break;
350                         default:
351                                 /* For now don't handle any other QP types */
352                                 break;
353                         }
354
355                         spin_unlock_irqrestore(&qp->r_lock, flags);
356                         rcu_read_unlock();
357                 } /* Unicast QP */
358         } /* Valid packet with TIDErr */
359
360         /* handle "RcvTypeErr" flags */
361         switch (rte) {
362         case RHF_RTE_ERROR_OP_CODE_ERR:
363         {
364                 u32 opcode;
365                 void *ebuf = NULL;
366                 __be32 *bth = NULL;
367
368                 if (rhf_use_egr_bfr(packet->rhf))
369                         ebuf = packet->ebuf;
370
371                 if (!ebuf)
372                         goto drop; /* this should never happen */
373
374                 if (lnh == HFI1_LRH_BTH)
375                         bth = (__be32 *)ebuf;
376                 else if (lnh == HFI1_LRH_GRH)
377                         bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
378                 else
379                         goto drop;
380
381                 opcode = be32_to_cpu(bth[0]) >> 24;
382                 opcode &= 0xff;
383
384                 if (opcode == IB_OPCODE_CNP) {
385                         /*
386                          * Only in pre-B0 h/w is the CNP_OPCODE handled
387                          * via this code path.
388                          */
389                         struct rvt_qp *qp = NULL;
390                         u32 lqpn, rqpn;
391                         u16 rlid;
392                         u8 svc_type, sl, sc5;
393
394                         sc5 = hdr2sc(rhdr, packet->rhf);
395                         sl = ibp->sc_to_sl[sc5];
396
397                         lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
398                         rcu_read_lock();
399                         qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
400                         if (!qp) {
401                                 rcu_read_unlock();
402                                 goto drop;
403                         }
404
405                         switch (qp->ibqp.qp_type) {
406                         case IB_QPT_UD:
407                                 rlid = 0;
408                                 rqpn = 0;
409                                 svc_type = IB_CC_SVCTYPE_UD;
410                                 break;
411                         case IB_QPT_UC:
412                                 rlid = be16_to_cpu(rhdr->lrh[3]);
413                                 rqpn = qp->remote_qpn;
414                                 svc_type = IB_CC_SVCTYPE_UC;
415                                 break;
416                         default:
417                                 goto drop;
418                         }
419
420                         process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
421                         rcu_read_unlock();
422                 }
423
424                 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
425                 break;
426         }
427         default:
428                 break;
429         }
430
431 drop:
432         return;
433 }
434
435 static inline void init_packet(struct hfi1_ctxtdata *rcd,
436                                struct hfi1_packet *packet)
437 {
438         packet->rsize = rcd->rcvhdrqentsize; /* words */
439         packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
440         packet->rcd = rcd;
441         packet->updegr = 0;
442         packet->etail = -1;
443         packet->rhf_addr = get_rhf_addr(rcd);
444         packet->rhf = rhf_to_cpu(packet->rhf_addr);
445         packet->rhqoff = rcd->head;
446         packet->numpkt = 0;
447         packet->rcv_flags = 0;
448 }
449
450 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
451                                bool do_cnp)
452 {
453         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
454         struct ib_header *hdr = pkt->hdr;
455         struct ib_other_headers *ohdr = pkt->ohdr;
456         struct ib_grh *grh = NULL;
457         u32 rqpn = 0, bth1;
458         u16 rlid, dlid = be16_to_cpu(hdr->lrh[1]);
459         u8 sc, svc_type;
460         bool is_mcast = false;
461
462         if (pkt->rcv_flags & HFI1_HAS_GRH)
463                 grh = &hdr->u.l.grh;
464
465         switch (qp->ibqp.qp_type) {
466         case IB_QPT_SMI:
467         case IB_QPT_GSI:
468         case IB_QPT_UD:
469                 rlid = be16_to_cpu(hdr->lrh[3]);
470                 rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
471                 svc_type = IB_CC_SVCTYPE_UD;
472                 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
473                         (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
474                 break;
475         case IB_QPT_UC:
476                 rlid = qp->remote_ah_attr.dlid;
477                 rqpn = qp->remote_qpn;
478                 svc_type = IB_CC_SVCTYPE_UC;
479                 break;
480         case IB_QPT_RC:
481                 rlid = qp->remote_ah_attr.dlid;
482                 rqpn = qp->remote_qpn;
483                 svc_type = IB_CC_SVCTYPE_RC;
484                 break;
485         default:
486                 return;
487         }
488
489         sc = hdr2sc(hdr, pkt->rhf);
490
491         bth1 = be32_to_cpu(ohdr->bth[1]);
492         if (do_cnp && (bth1 & HFI1_FECN_SMASK)) {
493                 u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
494
495                 return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh);
496         }
497
498         if (!is_mcast && (bth1 & HFI1_BECN_SMASK)) {
499                 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
500                 u32 lqpn = bth1 & RVT_QPN_MASK;
501                 u8 sl = ibp->sc_to_sl[sc];
502
503                 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
504         }
505
506 }
507
508 struct ps_mdata {
509         struct hfi1_ctxtdata *rcd;
510         u32 rsize;
511         u32 maxcnt;
512         u32 ps_head;
513         u32 ps_tail;
514         u32 ps_seq;
515 };
516
517 static inline void init_ps_mdata(struct ps_mdata *mdata,
518                                  struct hfi1_packet *packet)
519 {
520         struct hfi1_ctxtdata *rcd = packet->rcd;
521
522         mdata->rcd = rcd;
523         mdata->rsize = packet->rsize;
524         mdata->maxcnt = packet->maxcnt;
525         mdata->ps_head = packet->rhqoff;
526
527         if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
528                 mdata->ps_tail = get_rcvhdrtail(rcd);
529                 if (rcd->ctxt == HFI1_CTRL_CTXT)
530                         mdata->ps_seq = rcd->seq_cnt;
531                 else
532                         mdata->ps_seq = 0; /* not used with DMA_RTAIL */
533         } else {
534                 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
535                 mdata->ps_seq = rcd->seq_cnt;
536         }
537 }
538
539 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
540                           struct hfi1_ctxtdata *rcd)
541 {
542         if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
543                 return mdata->ps_head == mdata->ps_tail;
544         return mdata->ps_seq != rhf_rcv_seq(rhf);
545 }
546
547 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
548                           struct hfi1_ctxtdata *rcd)
549 {
550         /*
551          * Control context can potentially receive an invalid rhf.
552          * Drop such packets.
553          */
554         if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
555                 return mdata->ps_seq != rhf_rcv_seq(rhf);
556
557         return 0;
558 }
559
560 static inline void update_ps_mdata(struct ps_mdata *mdata,
561                                    struct hfi1_ctxtdata *rcd)
562 {
563         mdata->ps_head += mdata->rsize;
564         if (mdata->ps_head >= mdata->maxcnt)
565                 mdata->ps_head = 0;
566
567         /* Control context must do seq counting */
568         if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
569             (rcd->ctxt == HFI1_CTRL_CTXT)) {
570                 if (++mdata->ps_seq > 13)
571                         mdata->ps_seq = 1;
572         }
573 }
574
575 /*
576  * prescan_rxq - search through the receive queue looking for packets
577  * containing Excplicit Congestion Notifications (FECNs, or BECNs).
578  * When an ECN is found, process the Congestion Notification, and toggle
579  * it off.
580  * This is declared as a macro to allow quick checking of the port to avoid
581  * the overhead of a function call if not enabled.
582  */
583 #define prescan_rxq(rcd, packet) \
584         do { \
585                 if (rcd->ppd->cc_prescan) \
586                         __prescan_rxq(packet); \
587         } while (0)
588 static void __prescan_rxq(struct hfi1_packet *packet)
589 {
590         struct hfi1_ctxtdata *rcd = packet->rcd;
591         struct ps_mdata mdata;
592
593         init_ps_mdata(&mdata, packet);
594
595         while (1) {
596                 struct hfi1_devdata *dd = rcd->dd;
597                 struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
598                 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
599                                          dd->rhf_offset;
600                 struct rvt_qp *qp;
601                 struct ib_header *hdr;
602                 struct ib_other_headers *ohdr;
603                 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
604                 u64 rhf = rhf_to_cpu(rhf_addr);
605                 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
606                 int is_ecn = 0;
607                 u8 lnh;
608
609                 if (ps_done(&mdata, rhf, rcd))
610                         break;
611
612                 if (ps_skip(&mdata, rhf, rcd))
613                         goto next;
614
615                 if (etype != RHF_RCV_TYPE_IB)
616                         goto next;
617
618                 hdr = hfi1_get_msgheader(dd, rhf_addr);
619
620                 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
621
622                 if (lnh == HFI1_LRH_BTH)
623                         ohdr = &hdr->u.oth;
624                 else if (lnh == HFI1_LRH_GRH)
625                         ohdr = &hdr->u.l.oth;
626                 else
627                         goto next; /* just in case */
628
629                 bth1 = be32_to_cpu(ohdr->bth[1]);
630                 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
631
632                 if (!is_ecn)
633                         goto next;
634
635                 qpn = bth1 & RVT_QPN_MASK;
636                 rcu_read_lock();
637                 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
638
639                 if (!qp) {
640                         rcu_read_unlock();
641                         goto next;
642                 }
643
644                 process_ecn(qp, packet, true);
645                 rcu_read_unlock();
646
647                 /* turn off BECN, FECN */
648                 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
649                 ohdr->bth[1] = cpu_to_be32(bth1);
650 next:
651                 update_ps_mdata(&mdata, rcd);
652         }
653 }
654
655 static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
656 {
657         int ret = RCV_PKT_OK;
658
659         /* Set up for the next packet */
660         packet->rhqoff += packet->rsize;
661         if (packet->rhqoff >= packet->maxcnt)
662                 packet->rhqoff = 0;
663
664         packet->numpkt++;
665         if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
666                 if (thread) {
667                         cond_resched();
668                 } else {
669                         ret = RCV_PKT_LIMIT;
670                         this_cpu_inc(*packet->rcd->dd->rcv_limit);
671                 }
672         }
673
674         packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
675                                      packet->rcd->dd->rhf_offset;
676         packet->rhf = rhf_to_cpu(packet->rhf_addr);
677
678         return ret;
679 }
680
681 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
682 {
683         int ret = RCV_PKT_OK;
684
685         packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
686                                          packet->rhf_addr);
687         packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
688         packet->etype = rhf_rcv_type(packet->rhf);
689         /* total length */
690         packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
691         /* retrieve eager buffer details */
692         packet->ebuf = NULL;
693         if (rhf_use_egr_bfr(packet->rhf)) {
694                 packet->etail = rhf_egr_index(packet->rhf);
695                 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
696                                  &packet->updegr);
697                 /*
698                  * Prefetch the contents of the eager buffer.  It is
699                  * OK to send a negative length to prefetch_range().
700                  * The +2 is the size of the RHF.
701                  */
702                 prefetch_range(packet->ebuf,
703                                packet->tlen - ((packet->rcd->rcvhdrqentsize -
704                                                (rhf_hdrq_offset(packet->rhf)
705                                                 + 2)) * 4));
706         }
707
708         /*
709          * Call a type specific handler for the packet. We
710          * should be able to trust that etype won't be beyond
711          * the range of valid indexes. If so something is really
712          * wrong and we can probably just let things come
713          * crashing down. There is no need to eat another
714          * comparison in this performance critical code.
715          */
716         packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
717         packet->numpkt++;
718
719         /* Set up for the next packet */
720         packet->rhqoff += packet->rsize;
721         if (packet->rhqoff >= packet->maxcnt)
722                 packet->rhqoff = 0;
723
724         if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
725                 if (thread) {
726                         cond_resched();
727                 } else {
728                         ret = RCV_PKT_LIMIT;
729                         this_cpu_inc(*packet->rcd->dd->rcv_limit);
730                 }
731         }
732
733         packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
734                                       packet->rcd->dd->rhf_offset;
735         packet->rhf = rhf_to_cpu(packet->rhf_addr);
736
737         return ret;
738 }
739
740 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
741 {
742         /*
743          * Update head regs etc., every 16 packets, if not last pkt,
744          * to help prevent rcvhdrq overflows, when many packets
745          * are processed and queue is nearly full.
746          * Don't request an interrupt for intermediate updates.
747          */
748         if (!last && !(packet->numpkt & 0xf)) {
749                 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
750                                packet->etail, 0, 0);
751                 packet->updegr = 0;
752         }
753         packet->rcv_flags = 0;
754 }
755
756 static inline void finish_packet(struct hfi1_packet *packet)
757 {
758         /*
759          * Nothing we need to free for the packet.
760          *
761          * The only thing we need to do is a final update and call for an
762          * interrupt
763          */
764         update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
765                        packet->etail, rcv_intr_dynamic, packet->numpkt);
766 }
767
768 static inline void process_rcv_qp_work(struct hfi1_packet *packet)
769 {
770         struct hfi1_ctxtdata *rcd;
771         struct rvt_qp *qp, *nqp;
772
773         rcd = packet->rcd;
774         rcd->head = packet->rhqoff;
775
776         /*
777          * Iterate over all QPs waiting to respond.
778          * The list won't change since the IRQ is only run on one CPU.
779          */
780         list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
781                 list_del_init(&qp->rspwait);
782                 if (qp->r_flags & RVT_R_RSP_NAK) {
783                         qp->r_flags &= ~RVT_R_RSP_NAK;
784                         hfi1_send_rc_ack(rcd, qp, 0);
785                 }
786                 if (qp->r_flags & RVT_R_RSP_SEND) {
787                         unsigned long flags;
788
789                         qp->r_flags &= ~RVT_R_RSP_SEND;
790                         spin_lock_irqsave(&qp->s_lock, flags);
791                         if (ib_rvt_state_ops[qp->state] &
792                                         RVT_PROCESS_OR_FLUSH_SEND)
793                                 hfi1_schedule_send(qp);
794                         spin_unlock_irqrestore(&qp->s_lock, flags);
795                 }
796                 if (atomic_dec_and_test(&qp->refcount))
797                         wake_up(&qp->wait);
798         }
799 }
800
801 /*
802  * Handle receive interrupts when using the no dma rtail option.
803  */
804 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
805 {
806         u32 seq;
807         int last = RCV_PKT_OK;
808         struct hfi1_packet packet;
809
810         init_packet(rcd, &packet);
811         seq = rhf_rcv_seq(packet.rhf);
812         if (seq != rcd->seq_cnt) {
813                 last = RCV_PKT_DONE;
814                 goto bail;
815         }
816
817         prescan_rxq(rcd, &packet);
818
819         while (last == RCV_PKT_OK) {
820                 last = process_rcv_packet(&packet, thread);
821                 seq = rhf_rcv_seq(packet.rhf);
822                 if (++rcd->seq_cnt > 13)
823                         rcd->seq_cnt = 1;
824                 if (seq != rcd->seq_cnt)
825                         last = RCV_PKT_DONE;
826                 process_rcv_update(last, &packet);
827         }
828         process_rcv_qp_work(&packet);
829 bail:
830         finish_packet(&packet);
831         return last;
832 }
833
834 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
835 {
836         u32 hdrqtail;
837         int last = RCV_PKT_OK;
838         struct hfi1_packet packet;
839
840         init_packet(rcd, &packet);
841         hdrqtail = get_rcvhdrtail(rcd);
842         if (packet.rhqoff == hdrqtail) {
843                 last = RCV_PKT_DONE;
844                 goto bail;
845         }
846         smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
847
848         prescan_rxq(rcd, &packet);
849
850         while (last == RCV_PKT_OK) {
851                 last = process_rcv_packet(&packet, thread);
852                 if (packet.rhqoff == hdrqtail)
853                         last = RCV_PKT_DONE;
854                 process_rcv_update(last, &packet);
855         }
856         process_rcv_qp_work(&packet);
857 bail:
858         finish_packet(&packet);
859         return last;
860 }
861
862 static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
863 {
864         int i;
865
866         for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
867                 dd->rcd[i]->do_interrupt =
868                         &handle_receive_interrupt_nodma_rtail;
869 }
870
871 static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
872 {
873         int i;
874
875         for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
876                 dd->rcd[i]->do_interrupt =
877                         &handle_receive_interrupt_dma_rtail;
878 }
879
880 void set_all_slowpath(struct hfi1_devdata *dd)
881 {
882         int i;
883
884         /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
885         for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
886                 dd->rcd[i]->do_interrupt = &handle_receive_interrupt;
887 }
888
889 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
890                                       struct hfi1_packet *packet,
891                                       struct hfi1_devdata *dd)
892 {
893         struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
894         struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
895                                                    packet->rhf_addr);
896         u8 etype = rhf_rcv_type(packet->rhf);
897
898         if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
899                 int hwstate = read_logical_state(dd);
900
901                 if (hwstate != LSTATE_ACTIVE) {
902                         dd_dev_info(dd, "Unexpected link state %d\n", hwstate);
903                         return 0;
904                 }
905
906                 queue_work(rcd->ppd->hfi1_wq, lsaw);
907                 return 1;
908         }
909         return 0;
910 }
911
912 /*
913  * handle_receive_interrupt - receive a packet
914  * @rcd: the context
915  *
916  * Called from interrupt handler for errors or receive interrupt.
917  * This is the slow path interrupt handler.
918  */
919 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
920 {
921         struct hfi1_devdata *dd = rcd->dd;
922         u32 hdrqtail;
923         int needset, last = RCV_PKT_OK;
924         struct hfi1_packet packet;
925         int skip_pkt = 0;
926
927         /* Control context will always use the slow path interrupt handler */
928         needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
929
930         init_packet(rcd, &packet);
931
932         if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
933                 u32 seq = rhf_rcv_seq(packet.rhf);
934
935                 if (seq != rcd->seq_cnt) {
936                         last = RCV_PKT_DONE;
937                         goto bail;
938                 }
939                 hdrqtail = 0;
940         } else {
941                 hdrqtail = get_rcvhdrtail(rcd);
942                 if (packet.rhqoff == hdrqtail) {
943                         last = RCV_PKT_DONE;
944                         goto bail;
945                 }
946                 smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
947
948                 /*
949                  * Control context can potentially receive an invalid
950                  * rhf. Drop such packets.
951                  */
952                 if (rcd->ctxt == HFI1_CTRL_CTXT) {
953                         u32 seq = rhf_rcv_seq(packet.rhf);
954
955                         if (seq != rcd->seq_cnt)
956                                 skip_pkt = 1;
957                 }
958         }
959
960         prescan_rxq(rcd, &packet);
961
962         while (last == RCV_PKT_OK) {
963                 if (unlikely(dd->do_drop &&
964                              atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
965                              DROP_PACKET_ON)) {
966                         dd->do_drop = 0;
967
968                         /* On to the next packet */
969                         packet.rhqoff += packet.rsize;
970                         packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
971                                           packet.rhqoff +
972                                           dd->rhf_offset;
973                         packet.rhf = rhf_to_cpu(packet.rhf_addr);
974
975                 } else if (skip_pkt) {
976                         last = skip_rcv_packet(&packet, thread);
977                         skip_pkt = 0;
978                 } else {
979                         /* Auto activate link on non-SC15 packet receive */
980                         if (unlikely(rcd->ppd->host_link_state ==
981                                      HLS_UP_ARMED) &&
982                             set_armed_to_active(rcd, &packet, dd))
983                                 goto bail;
984                         last = process_rcv_packet(&packet, thread);
985                 }
986
987                 if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
988                         u32 seq = rhf_rcv_seq(packet.rhf);
989
990                         if (++rcd->seq_cnt > 13)
991                                 rcd->seq_cnt = 1;
992                         if (seq != rcd->seq_cnt)
993                                 last = RCV_PKT_DONE;
994                         if (needset) {
995                                 dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
996                                 set_all_nodma_rtail(dd);
997                                 needset = 0;
998                         }
999                 } else {
1000                         if (packet.rhqoff == hdrqtail)
1001                                 last = RCV_PKT_DONE;
1002                         /*
1003                          * Control context can potentially receive an invalid
1004                          * rhf. Drop such packets.
1005                          */
1006                         if (rcd->ctxt == HFI1_CTRL_CTXT) {
1007                                 u32 seq = rhf_rcv_seq(packet.rhf);
1008
1009                                 if (++rcd->seq_cnt > 13)
1010                                         rcd->seq_cnt = 1;
1011                                 if (!last && (seq != rcd->seq_cnt))
1012                                         skip_pkt = 1;
1013                         }
1014
1015                         if (needset) {
1016                                 dd_dev_info(dd,
1017                                             "Switching to DMA_RTAIL\n");
1018                                 set_all_dma_rtail(dd);
1019                                 needset = 0;
1020                         }
1021                 }
1022
1023                 process_rcv_update(last, &packet);
1024         }
1025
1026         process_rcv_qp_work(&packet);
1027
1028 bail:
1029         /*
1030          * Always write head at end, and setup rcv interrupt, even
1031          * if no packets were processed.
1032          */
1033         finish_packet(&packet);
1034         return last;
1035 }
1036
1037 /*
1038  * We may discover in the interrupt that the hardware link state has
1039  * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1040  * and we need to update the driver's notion of the link state.  We cannot
1041  * run set_link_state from interrupt context, so we queue this function on
1042  * a workqueue.
1043  *
1044  * We delay the regular interrupt processing until after the state changes
1045  * so that the link will be in the correct state by the time any application
1046  * we wake up attempts to send a reply to any message it received.
1047  * (Subsequent receive interrupts may possibly force the wakeup before we
1048  * update the link state.)
1049  *
1050  * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1051  * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1052  * so we're safe from use-after-free of the rcd.
1053  */
1054 void receive_interrupt_work(struct work_struct *work)
1055 {
1056         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1057                                                   linkstate_active_work);
1058         struct hfi1_devdata *dd = ppd->dd;
1059         int i;
1060
1061         /* Received non-SC15 packet implies neighbor_normal */
1062         ppd->neighbor_normal = 1;
1063         set_link_state(ppd, HLS_UP_ACTIVE);
1064
1065         /*
1066          * Interrupt all kernel contexts that could have had an
1067          * interrupt during auto activation.
1068          */
1069         for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++)
1070                 force_recv_intr(dd->rcd[i]);
1071 }
1072
1073 /*
1074  * Convert a given MTU size to the on-wire MAD packet enumeration.
1075  * Return -1 if the size is invalid.
1076  */
1077 int mtu_to_enum(u32 mtu, int default_if_bad)
1078 {
1079         switch (mtu) {
1080         case     0: return OPA_MTU_0;
1081         case   256: return OPA_MTU_256;
1082         case   512: return OPA_MTU_512;
1083         case  1024: return OPA_MTU_1024;
1084         case  2048: return OPA_MTU_2048;
1085         case  4096: return OPA_MTU_4096;
1086         case  8192: return OPA_MTU_8192;
1087         case 10240: return OPA_MTU_10240;
1088         }
1089         return default_if_bad;
1090 }
1091
1092 u16 enum_to_mtu(int mtu)
1093 {
1094         switch (mtu) {
1095         case OPA_MTU_0:     return 0;
1096         case OPA_MTU_256:   return 256;
1097         case OPA_MTU_512:   return 512;
1098         case OPA_MTU_1024:  return 1024;
1099         case OPA_MTU_2048:  return 2048;
1100         case OPA_MTU_4096:  return 4096;
1101         case OPA_MTU_8192:  return 8192;
1102         case OPA_MTU_10240: return 10240;
1103         default: return 0xffff;
1104         }
1105 }
1106
1107 /*
1108  * set_mtu - set the MTU
1109  * @ppd: the per port data
1110  *
1111  * We can handle "any" incoming size, the issue here is whether we
1112  * need to restrict our outgoing size.  We do not deal with what happens
1113  * to programs that are already running when the size changes.
1114  */
1115 int set_mtu(struct hfi1_pportdata *ppd)
1116 {
1117         struct hfi1_devdata *dd = ppd->dd;
1118         int i, drain, ret = 0, is_up = 0;
1119
1120         ppd->ibmtu = 0;
1121         for (i = 0; i < ppd->vls_supported; i++)
1122                 if (ppd->ibmtu < dd->vld[i].mtu)
1123                         ppd->ibmtu = dd->vld[i].mtu;
1124         ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1125
1126         mutex_lock(&ppd->hls_lock);
1127         if (ppd->host_link_state == HLS_UP_INIT ||
1128             ppd->host_link_state == HLS_UP_ARMED ||
1129             ppd->host_link_state == HLS_UP_ACTIVE)
1130                 is_up = 1;
1131
1132         drain = !is_ax(dd) && is_up;
1133
1134         if (drain)
1135                 /*
1136                  * MTU is specified per-VL. To ensure that no packet gets
1137                  * stuck (due, e.g., to the MTU for the packet's VL being
1138                  * reduced), empty the per-VL FIFOs before adjusting MTU.
1139                  */
1140                 ret = stop_drain_data_vls(dd);
1141
1142         if (ret) {
1143                 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1144                            __func__);
1145                 goto err;
1146         }
1147
1148         hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1149
1150         if (drain)
1151                 open_fill_data_vls(dd); /* reopen all VLs */
1152
1153 err:
1154         mutex_unlock(&ppd->hls_lock);
1155
1156         return ret;
1157 }
1158
1159 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1160 {
1161         struct hfi1_devdata *dd = ppd->dd;
1162
1163         ppd->lid = lid;
1164         ppd->lmc = lmc;
1165         hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1166
1167         dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1168
1169         return 0;
1170 }
1171
1172 void shutdown_led_override(struct hfi1_pportdata *ppd)
1173 {
1174         struct hfi1_devdata *dd = ppd->dd;
1175
1176         /*
1177          * This pairs with the memory barrier in hfi1_start_led_override to
1178          * ensure that we read the correct state of LED beaconing represented
1179          * by led_override_timer_active
1180          */
1181         smp_rmb();
1182         if (atomic_read(&ppd->led_override_timer_active)) {
1183                 del_timer_sync(&ppd->led_override_timer);
1184                 atomic_set(&ppd->led_override_timer_active, 0);
1185                 /* Ensure the atomic_set is visible to all CPUs */
1186                 smp_wmb();
1187         }
1188
1189         /* Hand control of the LED to the DC for normal operation */
1190         write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1191 }
1192
1193 static void run_led_override(unsigned long opaque)
1194 {
1195         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
1196         struct hfi1_devdata *dd = ppd->dd;
1197         unsigned long timeout;
1198         int phase_idx;
1199
1200         if (!(dd->flags & HFI1_INITTED))
1201                 return;
1202
1203         phase_idx = ppd->led_override_phase & 1;
1204
1205         setextled(dd, phase_idx);
1206
1207         timeout = ppd->led_override_vals[phase_idx];
1208
1209         /* Set up for next phase */
1210         ppd->led_override_phase = !ppd->led_override_phase;
1211
1212         mod_timer(&ppd->led_override_timer, jiffies + timeout);
1213 }
1214
1215 /*
1216  * To have the LED blink in a particular pattern, provide timeon and timeoff
1217  * in milliseconds.
1218  * To turn off custom blinking and return to normal operation, use
1219  * shutdown_led_override()
1220  */
1221 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1222                              unsigned int timeoff)
1223 {
1224         if (!(ppd->dd->flags & HFI1_INITTED))
1225                 return;
1226
1227         /* Convert to jiffies for direct use in timer */
1228         ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1229         ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1230
1231         /* Arbitrarily start from LED on phase */
1232         ppd->led_override_phase = 1;
1233
1234         /*
1235          * If the timer has not already been started, do so. Use a "quick"
1236          * timeout so the handler will be called soon to look at our request.
1237          */
1238         if (!timer_pending(&ppd->led_override_timer)) {
1239                 setup_timer(&ppd->led_override_timer, run_led_override,
1240                             (unsigned long)ppd);
1241                 ppd->led_override_timer.expires = jiffies + 1;
1242                 add_timer(&ppd->led_override_timer);
1243                 atomic_set(&ppd->led_override_timer_active, 1);
1244                 /* Ensure the atomic_set is visible to all CPUs */
1245                 smp_wmb();
1246         }
1247 }
1248
1249 /**
1250  * hfi1_reset_device - reset the chip if possible
1251  * @unit: the device to reset
1252  *
1253  * Whether or not reset is successful, we attempt to re-initialize the chip
1254  * (that is, much like a driver unload/reload).  We clear the INITTED flag
1255  * so that the various entry points will fail until we reinitialize.  For
1256  * now, we only allow this if no user contexts are open that use chip resources
1257  */
1258 int hfi1_reset_device(int unit)
1259 {
1260         int ret, i;
1261         struct hfi1_devdata *dd = hfi1_lookup(unit);
1262         struct hfi1_pportdata *ppd;
1263         unsigned long flags;
1264         int pidx;
1265
1266         if (!dd) {
1267                 ret = -ENODEV;
1268                 goto bail;
1269         }
1270
1271         dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1272
1273         if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
1274                 dd_dev_info(dd,
1275                             "Invalid unit number %u or not initialized or not present\n",
1276                             unit);
1277                 ret = -ENXIO;
1278                 goto bail;
1279         }
1280
1281         spin_lock_irqsave(&dd->uctxt_lock, flags);
1282         if (dd->rcd)
1283                 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
1284                         if (!dd->rcd[i] || !dd->rcd[i]->cnt)
1285                                 continue;
1286                         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1287                         ret = -EBUSY;
1288                         goto bail;
1289                 }
1290         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1291
1292         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1293                 ppd = dd->pport + pidx;
1294
1295                 shutdown_led_override(ppd);
1296         }
1297         if (dd->flags & HFI1_HAS_SEND_DMA)
1298                 sdma_exit(dd);
1299
1300         hfi1_reset_cpu_counters(dd);
1301
1302         ret = hfi1_init(dd, 1);
1303
1304         if (ret)
1305                 dd_dev_err(dd,
1306                            "Reinitialize unit %u after reset failed with %d\n",
1307                            unit, ret);
1308         else
1309                 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1310                             unit);
1311
1312 bail:
1313         return ret;
1314 }
1315
1316 void handle_eflags(struct hfi1_packet *packet)
1317 {
1318         struct hfi1_ctxtdata *rcd = packet->rcd;
1319         u32 rte = rhf_rcv_type_err(packet->rhf);
1320
1321         rcv_hdrerr(rcd, rcd->ppd, packet);
1322         if (rhf_err_flags(packet->rhf))
1323                 dd_dev_err(rcd->dd,
1324                            "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1325                            rcd->ctxt, packet->rhf,
1326                            packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1327                            packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1328                            packet->rhf & RHF_DC_ERR ? "dc " : "",
1329                            packet->rhf & RHF_TID_ERR ? "tid " : "",
1330                            packet->rhf & RHF_LEN_ERR ? "len " : "",
1331                            packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1332                            packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
1333                            packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1334                            rte);
1335 }
1336
1337 /*
1338  * The following functions are called by the interrupt handler. They are type
1339  * specific handlers for each packet type.
1340  */
1341 int process_receive_ib(struct hfi1_packet *packet)
1342 {
1343         trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
1344                           packet->rcd->ctxt,
1345                           rhf_err_flags(packet->rhf),
1346                           RHF_RCV_TYPE_IB,
1347                           packet->hlen,
1348                           packet->tlen,
1349                           packet->updegr,
1350                           rhf_egr_index(packet->rhf));
1351
1352         if (unlikely(rhf_err_flags(packet->rhf))) {
1353                 handle_eflags(packet);
1354                 return RHF_RCV_CONTINUE;
1355         }
1356
1357         hfi1_ib_rcv(packet);
1358         return RHF_RCV_CONTINUE;
1359 }
1360
1361 int process_receive_bypass(struct hfi1_packet *packet)
1362 {
1363         if (unlikely(rhf_err_flags(packet->rhf)))
1364                 handle_eflags(packet);
1365
1366         dd_dev_err(packet->rcd->dd,
1367                    "Bypass packets are not supported in normal operation. Dropping\n");
1368         incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
1369         return RHF_RCV_CONTINUE;
1370 }
1371
1372 int process_receive_error(struct hfi1_packet *packet)
1373 {
1374         handle_eflags(packet);
1375
1376         if (unlikely(rhf_err_flags(packet->rhf)))
1377                 dd_dev_err(packet->rcd->dd,
1378                            "Unhandled error packet received. Dropping.\n");
1379
1380         return RHF_RCV_CONTINUE;
1381 }
1382
1383 int kdeth_process_expected(struct hfi1_packet *packet)
1384 {
1385         if (unlikely(rhf_err_flags(packet->rhf)))
1386                 handle_eflags(packet);
1387
1388         dd_dev_err(packet->rcd->dd,
1389                    "Unhandled expected packet received. Dropping.\n");
1390         return RHF_RCV_CONTINUE;
1391 }
1392
1393 int kdeth_process_eager(struct hfi1_packet *packet)
1394 {
1395         if (unlikely(rhf_err_flags(packet->rhf)))
1396                 handle_eflags(packet);
1397
1398         dd_dev_err(packet->rcd->dd,
1399                    "Unhandled eager packet received. Dropping.\n");
1400         return RHF_RCV_CONTINUE;
1401 }
1402
1403 int process_receive_invalid(struct hfi1_packet *packet)
1404 {
1405         dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1406                    rhf_rcv_type(packet->rhf));
1407         return RHF_RCV_CONTINUE;
1408 }