]>
Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/spinlock.h> | |
52 | #include <linux/pci.h> | |
53 | #include <linux/io.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/netdevice.h> | |
56 | #include <linux/vmalloc.h> | |
57 | #include <linux/module.h> | |
58 | #include <linux/prefetch.h> | |
59 | ||
60 | #include "hfi.h" | |
61 | #include "trace.h" | |
62 | #include "qp.h" | |
63 | #include "sdma.h" | |
64 | ||
65 | #undef pr_fmt | |
66 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | |
67 | ||
68 | /* | |
69 | * The size has to be longer than this string, so we can append | |
70 | * board/chip information to it in the initialization code. | |
71 | */ | |
72 | const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; | |
73 | ||
74 | DEFINE_SPINLOCK(hfi1_devs_lock); | |
75 | LIST_HEAD(hfi1_dev_list); | |
76 | DEFINE_MUTEX(hfi1_mutex); /* general driver use */ | |
77 | ||
78 | unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; | |
79 | module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); | |
80 | MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192"); | |
81 | ||
82 | unsigned int hfi1_cu = 1; | |
83 | module_param_named(cu, hfi1_cu, uint, S_IRUGO); | |
84 | MODULE_PARM_DESC(cu, "Credit return units"); | |
85 | ||
86 | unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; | |
87 | static int hfi1_caps_set(const char *, const struct kernel_param *); | |
88 | static int hfi1_caps_get(char *, const struct kernel_param *); | |
89 | static const struct kernel_param_ops cap_ops = { | |
90 | .set = hfi1_caps_set, | |
91 | .get = hfi1_caps_get | |
92 | }; | |
93 | module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); | |
94 | MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); | |
95 | ||
96 | MODULE_LICENSE("Dual BSD/GPL"); | |
97 | MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); | |
98 | MODULE_VERSION(HFI1_DRIVER_VERSION); | |
99 | ||
100 | /* | |
101 | * MAX_PKT_RCV is the max # if packets processed per receive interrupt. | |
102 | */ | |
103 | #define MAX_PKT_RECV 64 | |
104 | #define EGR_HEAD_UPDATE_THRESHOLD 16 | |
105 | ||
106 | struct hfi1_ib_stats hfi1_stats; | |
107 | ||
108 | static int hfi1_caps_set(const char *val, const struct kernel_param *kp) | |
109 | { | |
110 | int ret = 0; | |
111 | unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, | |
112 | cap_mask = *cap_mask_ptr, value, diff, | |
113 | write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | | |
114 | HFI1_CAP_WRITABLE_MASK); | |
115 | ||
116 | ret = kstrtoul(val, 0, &value); | |
117 | if (ret) { | |
118 | pr_warn("Invalid module parameter value for 'cap_mask'\n"); | |
119 | goto done; | |
120 | } | |
121 | /* Get the changed bits (except the locked bit) */ | |
122 | diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); | |
123 | ||
124 | /* Remove any bits that are not allowed to change after driver load */ | |
125 | if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { | |
126 | pr_warn("Ignoring non-writable capability bits %#lx\n", | |
127 | diff & ~write_mask); | |
128 | diff &= write_mask; | |
129 | } | |
130 | ||
131 | /* Mask off any reserved bits */ | |
132 | diff &= ~HFI1_CAP_RESERVED_MASK; | |
133 | /* Clear any previously set and changing bits */ | |
134 | cap_mask &= ~diff; | |
135 | /* Update the bits with the new capability */ | |
136 | cap_mask |= (value & diff); | |
137 | /* Check for any kernel/user restrictions */ | |
138 | diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ | |
139 | ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); | |
140 | cap_mask &= ~diff; | |
141 | /* Set the bitmask to the final set */ | |
142 | *cap_mask_ptr = cap_mask; | |
143 | done: | |
144 | return ret; | |
145 | } | |
146 | ||
147 | static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) | |
148 | { | |
149 | unsigned long cap_mask = *(unsigned long *)kp->arg; | |
150 | ||
151 | cap_mask &= ~HFI1_CAP_LOCKED_SMASK; | |
152 | cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); | |
153 | ||
154 | return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); | |
155 | } | |
156 | ||
157 | const char *get_unit_name(int unit) | |
158 | { | |
159 | static char iname[16]; | |
160 | ||
9805071e | 161 | snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit); |
77241056 MM |
162 | return iname; |
163 | } | |
164 | ||
165 | /* | |
166 | * Return count of units with at least one port ACTIVE. | |
167 | */ | |
168 | int hfi1_count_active_units(void) | |
169 | { | |
170 | struct hfi1_devdata *dd; | |
171 | struct hfi1_pportdata *ppd; | |
172 | unsigned long flags; | |
173 | int pidx, nunits_active = 0; | |
174 | ||
175 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
176 | list_for_each_entry(dd, &hfi1_dev_list, list) { | |
177 | if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase) | |
178 | continue; | |
179 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
180 | ppd = dd->pport + pidx; | |
181 | if (ppd->lid && ppd->linkup) { | |
182 | nunits_active++; | |
183 | break; | |
184 | } | |
185 | } | |
186 | } | |
187 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
188 | return nunits_active; | |
189 | } | |
190 | ||
191 | /* | |
192 | * Return count of all units, optionally return in arguments | |
193 | * the number of usable (present) units, and the number of | |
194 | * ports that are up. | |
195 | */ | |
196 | int hfi1_count_units(int *npresentp, int *nupp) | |
197 | { | |
198 | int nunits = 0, npresent = 0, nup = 0; | |
199 | struct hfi1_devdata *dd; | |
200 | unsigned long flags; | |
201 | int pidx; | |
202 | struct hfi1_pportdata *ppd; | |
203 | ||
204 | spin_lock_irqsave(&hfi1_devs_lock, flags); | |
205 | ||
206 | list_for_each_entry(dd, &hfi1_dev_list, list) { | |
207 | nunits++; | |
208 | if ((dd->flags & HFI1_PRESENT) && dd->kregbase) | |
209 | npresent++; | |
210 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
211 | ppd = dd->pport + pidx; | |
212 | if (ppd->lid && ppd->linkup) | |
213 | nup++; | |
214 | } | |
215 | } | |
216 | ||
217 | spin_unlock_irqrestore(&hfi1_devs_lock, flags); | |
218 | ||
219 | if (npresentp) | |
220 | *npresentp = npresent; | |
221 | if (nupp) | |
222 | *nupp = nup; | |
223 | ||
224 | return nunits; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Get address of eager buffer from it's index (allocated in chunks, not | |
229 | * contiguous). | |
230 | */ | |
231 | static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, | |
232 | u8 *update) | |
233 | { | |
234 | u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); | |
235 | ||
236 | *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; | |
237 | return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + | |
238 | (offset * RCV_BUF_BLOCK_SIZE)); | |
239 | } | |
240 | ||
241 | /* | |
242 | * Validate and encode the a given RcvArray Buffer size. | |
243 | * The function will check whether the given size falls within | |
244 | * allowed size ranges for the respective type and, optionally, | |
245 | * return the proper encoding. | |
246 | */ | |
247 | inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) | |
248 | { | |
249 | if (unlikely(!IS_ALIGNED(size, PAGE_SIZE))) | |
250 | return 0; | |
251 | if (unlikely(size < MIN_EAGER_BUFFER)) | |
252 | return 0; | |
253 | if (size > | |
254 | (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) | |
255 | return 0; | |
256 | if (encoded) | |
257 | *encoded = ilog2(size / PAGE_SIZE) + 1; | |
258 | return 1; | |
259 | } | |
260 | ||
261 | static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, | |
262 | struct hfi1_packet *packet) | |
263 | { | |
264 | struct hfi1_message_header *rhdr = packet->hdr; | |
265 | u32 rte = rhf_rcv_type_err(packet->rhf); | |
266 | int lnh = be16_to_cpu(rhdr->lrh[0]) & 3; | |
267 | struct hfi1_ibport *ibp = &ppd->ibport_data; | |
268 | ||
269 | if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) | |
270 | return; | |
271 | ||
272 | if (packet->rhf & RHF_TID_ERR) { | |
273 | /* For TIDERR and RC QPs preemptively schedule a NAK */ | |
274 | struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr; | |
275 | struct hfi1_other_headers *ohdr = NULL; | |
276 | u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ | |
277 | u16 lid = be16_to_cpu(hdr->lrh[1]); | |
278 | u32 qp_num; | |
279 | u32 rcv_flags = 0; | |
280 | ||
281 | /* Sanity check packet */ | |
282 | if (tlen < 24) | |
283 | goto drop; | |
284 | ||
285 | /* Check for GRH */ | |
286 | if (lnh == HFI1_LRH_BTH) | |
287 | ohdr = &hdr->u.oth; | |
288 | else if (lnh == HFI1_LRH_GRH) { | |
289 | u32 vtf; | |
290 | ||
291 | ohdr = &hdr->u.l.oth; | |
292 | if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) | |
293 | goto drop; | |
294 | vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); | |
295 | if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) | |
296 | goto drop; | |
297 | rcv_flags |= HFI1_HAS_GRH; | |
298 | } else | |
299 | goto drop; | |
300 | ||
301 | /* Get the destination QP number. */ | |
302 | qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; | |
303 | if (lid < HFI1_MULTICAST_LID_BASE) { | |
304 | struct hfi1_qp *qp; | |
b77d713a | 305 | unsigned long flags; |
77241056 MM |
306 | |
307 | rcu_read_lock(); | |
308 | qp = hfi1_lookup_qpn(ibp, qp_num); | |
309 | if (!qp) { | |
310 | rcu_read_unlock(); | |
311 | goto drop; | |
312 | } | |
313 | ||
314 | /* | |
315 | * Handle only RC QPs - for other QP types drop error | |
316 | * packet. | |
317 | */ | |
b77d713a | 318 | spin_lock_irqsave(&qp->r_lock, flags); |
77241056 MM |
319 | |
320 | /* Check for valid receive state. */ | |
321 | if (!(ib_hfi1_state_ops[qp->state] & | |
322 | HFI1_PROCESS_RECV_OK)) { | |
323 | ibp->n_pkt_drops++; | |
324 | } | |
325 | ||
326 | switch (qp->ibqp.qp_type) { | |
327 | case IB_QPT_RC: | |
328 | hfi1_rc_hdrerr( | |
329 | rcd, | |
330 | hdr, | |
331 | rcv_flags, | |
332 | qp); | |
333 | break; | |
334 | default: | |
335 | /* For now don't handle any other QP types */ | |
336 | break; | |
337 | } | |
338 | ||
b77d713a | 339 | spin_unlock_irqrestore(&qp->r_lock, flags); |
77241056 MM |
340 | rcu_read_unlock(); |
341 | } /* Unicast QP */ | |
342 | } /* Valid packet with TIDErr */ | |
343 | ||
344 | /* handle "RcvTypeErr" flags */ | |
345 | switch (rte) { | |
346 | case RHF_RTE_ERROR_OP_CODE_ERR: | |
347 | { | |
348 | u32 opcode; | |
349 | void *ebuf = NULL; | |
350 | __be32 *bth = NULL; | |
351 | ||
352 | if (rhf_use_egr_bfr(packet->rhf)) | |
353 | ebuf = packet->ebuf; | |
354 | ||
355 | if (ebuf == NULL) | |
356 | goto drop; /* this should never happen */ | |
357 | ||
358 | if (lnh == HFI1_LRH_BTH) | |
359 | bth = (__be32 *)ebuf; | |
360 | else if (lnh == HFI1_LRH_GRH) | |
361 | bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh)); | |
362 | else | |
363 | goto drop; | |
364 | ||
365 | opcode = be32_to_cpu(bth[0]) >> 24; | |
366 | opcode &= 0xff; | |
367 | ||
368 | if (opcode == IB_OPCODE_CNP) { | |
369 | /* | |
370 | * Only in pre-B0 h/w is the CNP_OPCODE handled | |
624be1db | 371 | * via this code path. |
77241056 MM |
372 | */ |
373 | struct hfi1_qp *qp = NULL; | |
374 | u32 lqpn, rqpn; | |
375 | u16 rlid; | |
376 | u8 svc_type, sl, sc5; | |
377 | ||
378 | sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf; | |
379 | if (rhf_dc_info(packet->rhf)) | |
380 | sc5 |= 0x10; | |
381 | sl = ibp->sc_to_sl[sc5]; | |
382 | ||
383 | lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK; | |
384 | rcu_read_lock(); | |
385 | qp = hfi1_lookup_qpn(ibp, lqpn); | |
386 | if (qp == NULL) { | |
387 | rcu_read_unlock(); | |
388 | goto drop; | |
389 | } | |
390 | ||
391 | switch (qp->ibqp.qp_type) { | |
392 | case IB_QPT_UD: | |
393 | rlid = 0; | |
394 | rqpn = 0; | |
395 | svc_type = IB_CC_SVCTYPE_UD; | |
396 | break; | |
397 | case IB_QPT_UC: | |
398 | rlid = be16_to_cpu(rhdr->lrh[3]); | |
399 | rqpn = qp->remote_qpn; | |
400 | svc_type = IB_CC_SVCTYPE_UC; | |
401 | break; | |
402 | default: | |
403 | goto drop; | |
404 | } | |
405 | ||
406 | process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); | |
407 | rcu_read_unlock(); | |
408 | } | |
409 | ||
410 | packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; | |
411 | break; | |
412 | } | |
413 | default: | |
414 | break; | |
415 | } | |
416 | ||
417 | drop: | |
418 | return; | |
419 | } | |
420 | ||
421 | static inline void init_packet(struct hfi1_ctxtdata *rcd, | |
422 | struct hfi1_packet *packet) | |
423 | { | |
424 | ||
425 | packet->rsize = rcd->rcvhdrqentsize; /* words */ | |
426 | packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ | |
427 | packet->rcd = rcd; | |
428 | packet->updegr = 0; | |
429 | packet->etail = -1; | |
f4f30031 | 430 | packet->rhf_addr = get_rhf_addr(rcd); |
77241056 MM |
431 | packet->rhf = rhf_to_cpu(packet->rhf_addr); |
432 | packet->rhqoff = rcd->head; | |
433 | packet->numpkt = 0; | |
434 | packet->rcv_flags = 0; | |
435 | } | |
436 | ||
437 | #ifndef CONFIG_PRESCAN_RXQ | |
438 | static void prescan_rxq(struct hfi1_packet *packet) {} | |
977940b8 | 439 | #else /* !CONFIG_PRESCAN_RXQ */ |
77241056 MM |
440 | static int prescan_receive_queue; |
441 | ||
442 | static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr, | |
443 | struct hfi1_other_headers *ohdr, | |
977940b8 | 444 | u64 rhf, u32 bth1, struct ib_grh *grh) |
77241056 MM |
445 | { |
446 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
977940b8 AK |
447 | u32 rqpn = 0; |
448 | u16 rlid; | |
77241056 | 449 | u8 sc5, svc_type; |
77241056 MM |
450 | |
451 | switch (qp->ibqp.qp_type) { | |
977940b8 AK |
452 | case IB_QPT_SMI: |
453 | case IB_QPT_GSI: | |
77241056 | 454 | case IB_QPT_UD: |
977940b8 AK |
455 | rlid = be16_to_cpu(hdr->lrh[3]); |
456 | rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK; | |
77241056 MM |
457 | svc_type = IB_CC_SVCTYPE_UD; |
458 | break; | |
977940b8 AK |
459 | case IB_QPT_UC: |
460 | rlid = qp->remote_ah_attr.dlid; | |
461 | rqpn = qp->remote_qpn; | |
462 | svc_type = IB_CC_SVCTYPE_UC; | |
463 | break; | |
464 | case IB_QPT_RC: | |
465 | rlid = qp->remote_ah_attr.dlid; | |
466 | rqpn = qp->remote_qpn; | |
467 | svc_type = IB_CC_SVCTYPE_RC; | |
468 | break; | |
77241056 MM |
469 | default: |
470 | return; | |
471 | } | |
472 | ||
77241056 MM |
473 | sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; |
474 | if (rhf_dc_info(rhf)) | |
475 | sc5 |= 0x10; | |
476 | ||
977940b8 | 477 | if (bth1 & HFI1_FECN_SMASK) { |
77241056 MM |
478 | u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]); |
479 | u16 dlid = be16_to_cpu(hdr->lrh[1]); | |
77241056 | 480 | |
977940b8 | 481 | return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh); |
77241056 MM |
482 | } |
483 | ||
977940b8 | 484 | if (bth1 & HFI1_BECN_SMASK) { |
77241056 | 485 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
977940b8 | 486 | u32 lqpn = bth1 & HFI1_QPN_MASK; |
77241056 MM |
487 | u8 sl = ibp->sc_to_sl[sc5]; |
488 | ||
977940b8 | 489 | process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); |
77241056 | 490 | } |
77241056 MM |
491 | } |
492 | ||
493 | struct ps_mdata { | |
494 | struct hfi1_ctxtdata *rcd; | |
495 | u32 rsize; | |
496 | u32 maxcnt; | |
497 | u32 ps_head; | |
498 | u32 ps_tail; | |
499 | u32 ps_seq; | |
500 | }; | |
501 | ||
502 | static inline void init_ps_mdata(struct ps_mdata *mdata, | |
503 | struct hfi1_packet *packet) | |
504 | { | |
505 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
506 | ||
507 | mdata->rcd = rcd; | |
508 | mdata->rsize = packet->rsize; | |
509 | mdata->maxcnt = packet->maxcnt; | |
3e7ccca0 | 510 | mdata->ps_head = packet->rhqoff; |
77241056 | 511 | |
82c2611d | 512 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
3e7ccca0 | 513 | mdata->ps_tail = get_rcvhdrtail(rcd); |
82c2611d NV |
514 | if (rcd->ctxt == HFI1_CTRL_CTXT) |
515 | mdata->ps_seq = rcd->seq_cnt; | |
516 | else | |
517 | mdata->ps_seq = 0; /* not used with DMA_RTAIL */ | |
77241056 MM |
518 | } else { |
519 | mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ | |
520 | mdata->ps_seq = rcd->seq_cnt; | |
521 | } | |
522 | } | |
523 | ||
82c2611d NV |
524 | static inline int ps_done(struct ps_mdata *mdata, u64 rhf, |
525 | struct hfi1_ctxtdata *rcd) | |
77241056 | 526 | { |
82c2611d | 527 | if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) |
77241056 MM |
528 | return mdata->ps_head == mdata->ps_tail; |
529 | return mdata->ps_seq != rhf_rcv_seq(rhf); | |
530 | } | |
531 | ||
82c2611d NV |
532 | static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, |
533 | struct hfi1_ctxtdata *rcd) | |
534 | { | |
535 | /* | |
536 | * Control context can potentially receive an invalid rhf. | |
537 | * Drop such packets. | |
538 | */ | |
539 | if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) | |
540 | return mdata->ps_seq != rhf_rcv_seq(rhf); | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
545 | static inline void update_ps_mdata(struct ps_mdata *mdata, | |
546 | struct hfi1_ctxtdata *rcd) | |
77241056 | 547 | { |
77241056 | 548 | mdata->ps_head += mdata->rsize; |
3e7ccca0 | 549 | if (mdata->ps_head >= mdata->maxcnt) |
77241056 | 550 | mdata->ps_head = 0; |
82c2611d NV |
551 | |
552 | /* Control context must do seq counting */ | |
553 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || | |
554 | (rcd->ctxt == HFI1_CTRL_CTXT)) { | |
77241056 MM |
555 | if (++mdata->ps_seq > 13) |
556 | mdata->ps_seq = 1; | |
557 | } | |
558 | } | |
559 | ||
560 | /* | |
561 | * prescan_rxq - search through the receive queue looking for packets | |
562 | * containing Excplicit Congestion Notifications (FECNs, or BECNs). | |
563 | * When an ECN is found, process the Congestion Notification, and toggle | |
564 | * it off. | |
565 | */ | |
566 | static void prescan_rxq(struct hfi1_packet *packet) | |
567 | { | |
568 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
569 | struct ps_mdata mdata; | |
570 | ||
571 | if (!prescan_receive_queue) | |
572 | return; | |
573 | ||
574 | init_ps_mdata(&mdata, packet); | |
575 | ||
576 | while (1) { | |
577 | struct hfi1_devdata *dd = rcd->dd; | |
578 | struct hfi1_ibport *ibp = &rcd->ppd->ibport_data; | |
579 | __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head + | |
580 | dd->rhf_offset; | |
581 | struct hfi1_qp *qp; | |
582 | struct hfi1_ib_header *hdr; | |
583 | struct hfi1_other_headers *ohdr; | |
584 | struct ib_grh *grh = NULL; | |
585 | u64 rhf = rhf_to_cpu(rhf_addr); | |
977940b8 | 586 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; |
77241056 MM |
587 | int is_ecn = 0; |
588 | u8 lnh; | |
589 | ||
82c2611d | 590 | if (ps_done(&mdata, rhf, rcd)) |
77241056 MM |
591 | break; |
592 | ||
82c2611d NV |
593 | if (ps_skip(&mdata, rhf, rcd)) |
594 | goto next; | |
595 | ||
77241056 MM |
596 | if (etype != RHF_RCV_TYPE_IB) |
597 | goto next; | |
598 | ||
599 | hdr = (struct hfi1_ib_header *) | |
600 | hfi1_get_msgheader(dd, rhf_addr); | |
601 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; | |
602 | ||
603 | if (lnh == HFI1_LRH_BTH) | |
604 | ohdr = &hdr->u.oth; | |
605 | else if (lnh == HFI1_LRH_GRH) { | |
606 | ohdr = &hdr->u.l.oth; | |
607 | grh = &hdr->u.l.grh; | |
608 | } else | |
609 | goto next; /* just in case */ | |
610 | ||
977940b8 AK |
611 | bth1 = be32_to_cpu(ohdr->bth[1]); |
612 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); | |
77241056 MM |
613 | |
614 | if (!is_ecn) | |
615 | goto next; | |
616 | ||
977940b8 | 617 | qpn = bth1 & HFI1_QPN_MASK; |
77241056 MM |
618 | rcu_read_lock(); |
619 | qp = hfi1_lookup_qpn(ibp, qpn); | |
620 | ||
621 | if (qp == NULL) { | |
622 | rcu_read_unlock(); | |
623 | goto next; | |
624 | } | |
625 | ||
977940b8 | 626 | process_ecn(qp, hdr, ohdr, rhf, bth1, grh); |
77241056 | 627 | rcu_read_unlock(); |
977940b8 AK |
628 | |
629 | /* turn off BECN, FECN */ | |
630 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); | |
631 | ohdr->bth[1] = cpu_to_be32(bth1); | |
77241056 | 632 | next: |
82c2611d | 633 | update_ps_mdata(&mdata, rcd); |
77241056 MM |
634 | } |
635 | } | |
9d2f53ef | 636 | #endif /* CONFIG_PRESCAN_RXQ */ |
82c2611d NV |
637 | |
638 | static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread) | |
639 | { | |
640 | int ret = RCV_PKT_OK; | |
641 | ||
642 | /* Set up for the next packet */ | |
643 | packet->rhqoff += packet->rsize; | |
644 | if (packet->rhqoff >= packet->maxcnt) | |
645 | packet->rhqoff = 0; | |
646 | ||
647 | packet->numpkt++; | |
648 | if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) { | |
649 | if (thread) { | |
650 | cond_resched(); | |
651 | } else { | |
652 | ret = RCV_PKT_LIMIT; | |
653 | this_cpu_inc(*packet->rcd->dd->rcv_limit); | |
654 | } | |
655 | } | |
656 | ||
657 | packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + | |
658 | packet->rcd->dd->rhf_offset; | |
659 | packet->rhf = rhf_to_cpu(packet->rhf_addr); | |
660 | ||
661 | return ret; | |
662 | } | |
77241056 | 663 | |
f4f30031 | 664 | static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) |
77241056 MM |
665 | { |
666 | int ret = RCV_PKT_OK; | |
667 | ||
668 | packet->hdr = hfi1_get_msgheader(packet->rcd->dd, | |
669 | packet->rhf_addr); | |
670 | packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; | |
671 | packet->etype = rhf_rcv_type(packet->rhf); | |
672 | /* total length */ | |
673 | packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ | |
674 | /* retrieve eager buffer details */ | |
675 | packet->ebuf = NULL; | |
676 | if (rhf_use_egr_bfr(packet->rhf)) { | |
677 | packet->etail = rhf_egr_index(packet->rhf); | |
678 | packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, | |
679 | &packet->updegr); | |
680 | /* | |
681 | * Prefetch the contents of the eager buffer. It is | |
682 | * OK to send a negative length to prefetch_range(). | |
683 | * The +2 is the size of the RHF. | |
684 | */ | |
685 | prefetch_range(packet->ebuf, | |
686 | packet->tlen - ((packet->rcd->rcvhdrqentsize - | |
687 | (rhf_hdrq_offset(packet->rhf)+2)) * 4)); | |
688 | } | |
689 | ||
690 | /* | |
691 | * Call a type specific handler for the packet. We | |
692 | * should be able to trust that etype won't be beyond | |
693 | * the range of valid indexes. If so something is really | |
694 | * wrong and we can probably just let things come | |
695 | * crashing down. There is no need to eat another | |
696 | * comparison in this performance critical code. | |
697 | */ | |
698 | packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet); | |
699 | packet->numpkt++; | |
700 | ||
701 | /* Set up for the next packet */ | |
702 | packet->rhqoff += packet->rsize; | |
703 | if (packet->rhqoff >= packet->maxcnt) | |
704 | packet->rhqoff = 0; | |
705 | ||
f4f30031 DL |
706 | if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) { |
707 | if (thread) { | |
708 | cond_resched(); | |
709 | } else { | |
710 | ret = RCV_PKT_LIMIT; | |
711 | this_cpu_inc(*packet->rcd->dd->rcv_limit); | |
712 | } | |
77241056 MM |
713 | } |
714 | ||
715 | packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff + | |
716 | packet->rcd->dd->rhf_offset; | |
717 | packet->rhf = rhf_to_cpu(packet->rhf_addr); | |
718 | ||
719 | return ret; | |
720 | } | |
721 | ||
722 | static inline void process_rcv_update(int last, struct hfi1_packet *packet) | |
723 | { | |
724 | /* | |
725 | * Update head regs etc., every 16 packets, if not last pkt, | |
726 | * to help prevent rcvhdrq overflows, when many packets | |
727 | * are processed and queue is nearly full. | |
728 | * Don't request an interrupt for intermediate updates. | |
729 | */ | |
730 | if (!last && !(packet->numpkt & 0xf)) { | |
731 | update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, | |
732 | packet->etail, 0, 0); | |
733 | packet->updegr = 0; | |
734 | } | |
735 | packet->rcv_flags = 0; | |
736 | } | |
737 | ||
738 | static inline void finish_packet(struct hfi1_packet *packet) | |
739 | { | |
740 | ||
741 | /* | |
742 | * Nothing we need to free for the packet. | |
743 | * | |
744 | * The only thing we need to do is a final update and call for an | |
745 | * interrupt | |
746 | */ | |
747 | update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, | |
748 | packet->etail, rcv_intr_dynamic, packet->numpkt); | |
749 | ||
750 | } | |
751 | ||
752 | static inline void process_rcv_qp_work(struct hfi1_packet *packet) | |
753 | { | |
754 | ||
755 | struct hfi1_ctxtdata *rcd; | |
756 | struct hfi1_qp *qp, *nqp; | |
757 | ||
758 | rcd = packet->rcd; | |
759 | rcd->head = packet->rhqoff; | |
760 | ||
761 | /* | |
762 | * Iterate over all QPs waiting to respond. | |
763 | * The list won't change since the IRQ is only run on one CPU. | |
764 | */ | |
765 | list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { | |
766 | list_del_init(&qp->rspwait); | |
2fd36865 MM |
767 | if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) { |
768 | qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK; | |
77241056 MM |
769 | hfi1_send_rc_ack(rcd, qp, 0); |
770 | } | |
771 | if (qp->r_flags & HFI1_R_RSP_SEND) { | |
772 | unsigned long flags; | |
773 | ||
774 | qp->r_flags &= ~HFI1_R_RSP_SEND; | |
775 | spin_lock_irqsave(&qp->s_lock, flags); | |
776 | if (ib_hfi1_state_ops[qp->state] & | |
777 | HFI1_PROCESS_OR_FLUSH_SEND) | |
778 | hfi1_schedule_send(qp); | |
779 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
780 | } | |
781 | if (atomic_dec_and_test(&qp->refcount)) | |
782 | wake_up(&qp->wait); | |
783 | } | |
784 | } | |
785 | ||
786 | /* | |
787 | * Handle receive interrupts when using the no dma rtail option. | |
788 | */ | |
f4f30031 | 789 | int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) |
77241056 MM |
790 | { |
791 | u32 seq; | |
f4f30031 | 792 | int last = RCV_PKT_OK; |
77241056 MM |
793 | struct hfi1_packet packet; |
794 | ||
795 | init_packet(rcd, &packet); | |
796 | seq = rhf_rcv_seq(packet.rhf); | |
f4f30031 DL |
797 | if (seq != rcd->seq_cnt) { |
798 | last = RCV_PKT_DONE; | |
77241056 | 799 | goto bail; |
f4f30031 | 800 | } |
77241056 MM |
801 | |
802 | prescan_rxq(&packet); | |
803 | ||
f4f30031 DL |
804 | while (last == RCV_PKT_OK) { |
805 | last = process_rcv_packet(&packet, thread); | |
77241056 MM |
806 | seq = rhf_rcv_seq(packet.rhf); |
807 | if (++rcd->seq_cnt > 13) | |
808 | rcd->seq_cnt = 1; | |
809 | if (seq != rcd->seq_cnt) | |
f4f30031 | 810 | last = RCV_PKT_DONE; |
77241056 MM |
811 | process_rcv_update(last, &packet); |
812 | } | |
813 | process_rcv_qp_work(&packet); | |
814 | bail: | |
815 | finish_packet(&packet); | |
f4f30031 | 816 | return last; |
77241056 MM |
817 | } |
818 | ||
f4f30031 | 819 | int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) |
77241056 MM |
820 | { |
821 | u32 hdrqtail; | |
f4f30031 | 822 | int last = RCV_PKT_OK; |
77241056 MM |
823 | struct hfi1_packet packet; |
824 | ||
825 | init_packet(rcd, &packet); | |
826 | hdrqtail = get_rcvhdrtail(rcd); | |
f4f30031 DL |
827 | if (packet.rhqoff == hdrqtail) { |
828 | last = RCV_PKT_DONE; | |
77241056 | 829 | goto bail; |
f4f30031 | 830 | } |
77241056 MM |
831 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
832 | ||
833 | prescan_rxq(&packet); | |
834 | ||
f4f30031 DL |
835 | while (last == RCV_PKT_OK) { |
836 | last = process_rcv_packet(&packet, thread); | |
77241056 | 837 | if (packet.rhqoff == hdrqtail) |
f4f30031 | 838 | last = RCV_PKT_DONE; |
77241056 MM |
839 | process_rcv_update(last, &packet); |
840 | } | |
841 | process_rcv_qp_work(&packet); | |
842 | bail: | |
843 | finish_packet(&packet); | |
f4f30031 | 844 | return last; |
77241056 MM |
845 | } |
846 | ||
847 | static inline void set_all_nodma_rtail(struct hfi1_devdata *dd) | |
848 | { | |
849 | int i; | |
850 | ||
82c2611d | 851 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) |
77241056 MM |
852 | dd->rcd[i]->do_interrupt = |
853 | &handle_receive_interrupt_nodma_rtail; | |
854 | } | |
855 | ||
856 | static inline void set_all_dma_rtail(struct hfi1_devdata *dd) | |
857 | { | |
858 | int i; | |
859 | ||
82c2611d | 860 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) |
77241056 MM |
861 | dd->rcd[i]->do_interrupt = |
862 | &handle_receive_interrupt_dma_rtail; | |
863 | } | |
864 | ||
fb9036dd JS |
865 | void set_all_slowpath(struct hfi1_devdata *dd) |
866 | { | |
867 | int i; | |
868 | ||
869 | /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ | |
870 | for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) | |
871 | dd->rcd[i]->do_interrupt = &handle_receive_interrupt; | |
872 | } | |
873 | ||
874 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, | |
875 | struct hfi1_packet packet, | |
876 | struct hfi1_devdata *dd) | |
877 | { | |
878 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; | |
879 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, | |
880 | packet.rhf_addr); | |
881 | ||
882 | if (hdr2sc(hdr, packet.rhf) != 0xf) { | |
883 | int hwstate = read_logical_state(dd); | |
884 | ||
885 | if (hwstate != LSTATE_ACTIVE) { | |
886 | dd_dev_info(dd, "Unexpected link state %d\n", hwstate); | |
887 | return 0; | |
888 | } | |
889 | ||
890 | queue_work(rcd->ppd->hfi1_wq, lsaw); | |
891 | return 1; | |
892 | } | |
893 | return 0; | |
894 | } | |
895 | ||
77241056 MM |
896 | /* |
897 | * handle_receive_interrupt - receive a packet | |
898 | * @rcd: the context | |
899 | * | |
900 | * Called from interrupt handler for errors or receive interrupt. | |
901 | * This is the slow path interrupt handler. | |
902 | */ | |
f4f30031 | 903 | int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) |
77241056 | 904 | { |
77241056 MM |
905 | struct hfi1_devdata *dd = rcd->dd; |
906 | u32 hdrqtail; | |
82c2611d | 907 | int needset, last = RCV_PKT_OK; |
77241056 | 908 | struct hfi1_packet packet; |
82c2611d NV |
909 | int skip_pkt = 0; |
910 | ||
911 | /* Control context will always use the slow path interrupt handler */ | |
912 | needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; | |
77241056 MM |
913 | |
914 | init_packet(rcd, &packet); | |
915 | ||
82c2611d | 916 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
77241056 MM |
917 | u32 seq = rhf_rcv_seq(packet.rhf); |
918 | ||
f4f30031 DL |
919 | if (seq != rcd->seq_cnt) { |
920 | last = RCV_PKT_DONE; | |
77241056 | 921 | goto bail; |
f4f30031 | 922 | } |
77241056 MM |
923 | hdrqtail = 0; |
924 | } else { | |
925 | hdrqtail = get_rcvhdrtail(rcd); | |
f4f30031 DL |
926 | if (packet.rhqoff == hdrqtail) { |
927 | last = RCV_PKT_DONE; | |
77241056 | 928 | goto bail; |
f4f30031 | 929 | } |
77241056 | 930 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
82c2611d NV |
931 | |
932 | /* | |
933 | * Control context can potentially receive an invalid | |
934 | * rhf. Drop such packets. | |
935 | */ | |
936 | if (rcd->ctxt == HFI1_CTRL_CTXT) { | |
937 | u32 seq = rhf_rcv_seq(packet.rhf); | |
938 | ||
939 | if (seq != rcd->seq_cnt) | |
940 | skip_pkt = 1; | |
941 | } | |
77241056 MM |
942 | } |
943 | ||
944 | prescan_rxq(&packet); | |
945 | ||
f4f30031 | 946 | while (last == RCV_PKT_OK) { |
77241056 MM |
947 | |
948 | if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet, | |
949 | DROP_PACKET_OFF) == DROP_PACKET_ON)) { | |
950 | dd->do_drop = 0; | |
951 | ||
952 | /* On to the next packet */ | |
953 | packet.rhqoff += packet.rsize; | |
954 | packet.rhf_addr = (__le32 *) rcd->rcvhdrq + | |
955 | packet.rhqoff + | |
956 | dd->rhf_offset; | |
957 | packet.rhf = rhf_to_cpu(packet.rhf_addr); | |
958 | ||
82c2611d NV |
959 | } else if (skip_pkt) { |
960 | last = skip_rcv_packet(&packet, thread); | |
961 | skip_pkt = 0; | |
77241056 | 962 | } else { |
fb9036dd JS |
963 | /* Auto activate link on non-SC15 packet receive */ |
964 | if (unlikely(rcd->ppd->host_link_state == | |
965 | HLS_UP_ARMED) && | |
966 | set_armed_to_active(rcd, packet, dd)) | |
967 | goto bail; | |
f4f30031 | 968 | last = process_rcv_packet(&packet, thread); |
77241056 MM |
969 | } |
970 | ||
82c2611d | 971 | if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { |
77241056 MM |
972 | u32 seq = rhf_rcv_seq(packet.rhf); |
973 | ||
974 | if (++rcd->seq_cnt > 13) | |
975 | rcd->seq_cnt = 1; | |
976 | if (seq != rcd->seq_cnt) | |
f4f30031 | 977 | last = RCV_PKT_DONE; |
77241056 MM |
978 | if (needset) { |
979 | dd_dev_info(dd, | |
980 | "Switching to NO_DMA_RTAIL\n"); | |
981 | set_all_nodma_rtail(dd); | |
982 | needset = 0; | |
983 | } | |
984 | } else { | |
985 | if (packet.rhqoff == hdrqtail) | |
f4f30031 | 986 | last = RCV_PKT_DONE; |
82c2611d NV |
987 | /* |
988 | * Control context can potentially receive an invalid | |
989 | * rhf. Drop such packets. | |
990 | */ | |
991 | if (rcd->ctxt == HFI1_CTRL_CTXT) { | |
992 | u32 seq = rhf_rcv_seq(packet.rhf); | |
993 | ||
994 | if (++rcd->seq_cnt > 13) | |
995 | rcd->seq_cnt = 1; | |
996 | if (!last && (seq != rcd->seq_cnt)) | |
997 | skip_pkt = 1; | |
998 | } | |
999 | ||
77241056 MM |
1000 | if (needset) { |
1001 | dd_dev_info(dd, | |
1002 | "Switching to DMA_RTAIL\n"); | |
1003 | set_all_dma_rtail(dd); | |
1004 | needset = 0; | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | process_rcv_update(last, &packet); | |
1009 | } | |
1010 | ||
1011 | process_rcv_qp_work(&packet); | |
1012 | ||
1013 | bail: | |
1014 | /* | |
1015 | * Always write head at end, and setup rcv interrupt, even | |
1016 | * if no packets were processed. | |
1017 | */ | |
1018 | finish_packet(&packet); | |
f4f30031 | 1019 | return last; |
77241056 MM |
1020 | } |
1021 | ||
fb9036dd JS |
1022 | /* |
1023 | * We may discover in the interrupt that the hardware link state has | |
1024 | * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), | |
1025 | * and we need to update the driver's notion of the link state. We cannot | |
1026 | * run set_link_state from interrupt context, so we queue this function on | |
1027 | * a workqueue. | |
1028 | * | |
1029 | * We delay the regular interrupt processing until after the state changes | |
1030 | * so that the link will be in the correct state by the time any application | |
1031 | * we wake up attempts to send a reply to any message it received. | |
1032 | * (Subsequent receive interrupts may possibly force the wakeup before we | |
1033 | * update the link state.) | |
1034 | * | |
1035 | * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes | |
1036 | * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, | |
1037 | * so we're safe from use-after-free of the rcd. | |
1038 | */ | |
1039 | void receive_interrupt_work(struct work_struct *work) | |
1040 | { | |
1041 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | |
1042 | linkstate_active_work); | |
1043 | struct hfi1_devdata *dd = ppd->dd; | |
1044 | int i; | |
1045 | ||
1046 | /* Received non-SC15 packet implies neighbor_normal */ | |
1047 | ppd->neighbor_normal = 1; | |
1048 | set_link_state(ppd, HLS_UP_ACTIVE); | |
1049 | ||
1050 | /* | |
1051 | * Interrupt all kernel contexts that could have had an | |
1052 | * interrupt during auto activation. | |
1053 | */ | |
1054 | for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++) | |
1055 | force_recv_intr(dd->rcd[i]); | |
1056 | } | |
1057 | ||
77241056 MM |
1058 | /* |
1059 | * Convert a given MTU size to the on-wire MAD packet enumeration. | |
1060 | * Return -1 if the size is invalid. | |
1061 | */ | |
1062 | int mtu_to_enum(u32 mtu, int default_if_bad) | |
1063 | { | |
1064 | switch (mtu) { | |
1065 | case 0: return OPA_MTU_0; | |
1066 | case 256: return OPA_MTU_256; | |
1067 | case 512: return OPA_MTU_512; | |
1068 | case 1024: return OPA_MTU_1024; | |
1069 | case 2048: return OPA_MTU_2048; | |
1070 | case 4096: return OPA_MTU_4096; | |
1071 | case 8192: return OPA_MTU_8192; | |
1072 | case 10240: return OPA_MTU_10240; | |
1073 | } | |
1074 | return default_if_bad; | |
1075 | } | |
1076 | ||
1077 | u16 enum_to_mtu(int mtu) | |
1078 | { | |
1079 | switch (mtu) { | |
1080 | case OPA_MTU_0: return 0; | |
1081 | case OPA_MTU_256: return 256; | |
1082 | case OPA_MTU_512: return 512; | |
1083 | case OPA_MTU_1024: return 1024; | |
1084 | case OPA_MTU_2048: return 2048; | |
1085 | case OPA_MTU_4096: return 4096; | |
1086 | case OPA_MTU_8192: return 8192; | |
1087 | case OPA_MTU_10240: return 10240; | |
1088 | default: return 0xffff; | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * set_mtu - set the MTU | |
1094 | * @ppd: the per port data | |
1095 | * | |
1096 | * We can handle "any" incoming size, the issue here is whether we | |
1097 | * need to restrict our outgoing size. We do not deal with what happens | |
1098 | * to programs that are already running when the size changes. | |
1099 | */ | |
1100 | int set_mtu(struct hfi1_pportdata *ppd) | |
1101 | { | |
1102 | struct hfi1_devdata *dd = ppd->dd; | |
1103 | int i, drain, ret = 0, is_up = 0; | |
1104 | ||
1105 | ppd->ibmtu = 0; | |
1106 | for (i = 0; i < ppd->vls_supported; i++) | |
1107 | if (ppd->ibmtu < dd->vld[i].mtu) | |
1108 | ppd->ibmtu = dd->vld[i].mtu; | |
1109 | ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); | |
1110 | ||
1111 | mutex_lock(&ppd->hls_lock); | |
1112 | if (ppd->host_link_state == HLS_UP_INIT | |
1113 | || ppd->host_link_state == HLS_UP_ARMED | |
1114 | || ppd->host_link_state == HLS_UP_ACTIVE) | |
1115 | is_up = 1; | |
1116 | ||
1117 | drain = !is_ax(dd) && is_up; | |
1118 | ||
1119 | if (drain) | |
1120 | /* | |
1121 | * MTU is specified per-VL. To ensure that no packet gets | |
1122 | * stuck (due, e.g., to the MTU for the packet's VL being | |
1123 | * reduced), empty the per-VL FIFOs before adjusting MTU. | |
1124 | */ | |
1125 | ret = stop_drain_data_vls(dd); | |
1126 | ||
1127 | if (ret) { | |
1128 | dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", | |
1129 | __func__); | |
1130 | goto err; | |
1131 | } | |
1132 | ||
1133 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); | |
1134 | ||
1135 | if (drain) | |
1136 | open_fill_data_vls(dd); /* reopen all VLs */ | |
1137 | ||
1138 | err: | |
1139 | mutex_unlock(&ppd->hls_lock); | |
1140 | ||
1141 | return ret; | |
1142 | } | |
1143 | ||
1144 | int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) | |
1145 | { | |
1146 | struct hfi1_devdata *dd = ppd->dd; | |
1147 | ||
1148 | ppd->lid = lid; | |
1149 | ppd->lmc = lmc; | |
1150 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); | |
1151 | ||
1152 | dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); | |
1153 | ||
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | /* | |
1158 | * Following deal with the "obviously simple" task of overriding the state | |
1159 | * of the LEDs, which normally indicate link physical and logical status. | |
1160 | * The complications arise in dealing with different hardware mappings | |
1161 | * and the board-dependent routine being called from interrupts. | |
1162 | * and then there's the requirement to _flash_ them. | |
1163 | */ | |
1164 | #define LED_OVER_FREQ_SHIFT 8 | |
1165 | #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) | |
1166 | /* Below is "non-zero" to force override, but both actual LEDs are off */ | |
1167 | #define LED_OVER_BOTH_OFF (8) | |
1168 | ||
1169 | static void run_led_override(unsigned long opaque) | |
1170 | { | |
1171 | struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque; | |
1172 | struct hfi1_devdata *dd = ppd->dd; | |
1173 | int timeoff; | |
1174 | int ph_idx; | |
1175 | ||
1176 | if (!(dd->flags & HFI1_INITTED)) | |
1177 | return; | |
1178 | ||
1179 | ph_idx = ppd->led_override_phase++ & 1; | |
1180 | ppd->led_override = ppd->led_override_vals[ph_idx]; | |
1181 | timeoff = ppd->led_override_timeoff; | |
1182 | ||
1183 | /* | |
1184 | * don't re-fire the timer if user asked for it to be off; we let | |
1185 | * it fire one more time after they turn it off to simplify | |
1186 | */ | |
1187 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | |
1188 | mod_timer(&ppd->led_override_timer, jiffies + timeoff); | |
1189 | } | |
1190 | ||
1191 | void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val) | |
1192 | { | |
1193 | struct hfi1_devdata *dd = ppd->dd; | |
1194 | int timeoff, freq; | |
1195 | ||
1196 | if (!(dd->flags & HFI1_INITTED)) | |
1197 | return; | |
1198 | ||
1199 | /* First check if we are blinking. If not, use 1HZ polling */ | |
1200 | timeoff = HZ; | |
1201 | freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; | |
1202 | ||
1203 | if (freq) { | |
1204 | /* For blink, set each phase from one nybble of val */ | |
1205 | ppd->led_override_vals[0] = val & 0xF; | |
1206 | ppd->led_override_vals[1] = (val >> 4) & 0xF; | |
1207 | timeoff = (HZ << 4)/freq; | |
1208 | } else { | |
1209 | /* Non-blink set both phases the same. */ | |
1210 | ppd->led_override_vals[0] = val & 0xF; | |
1211 | ppd->led_override_vals[1] = val & 0xF; | |
1212 | } | |
1213 | ppd->led_override_timeoff = timeoff; | |
1214 | ||
1215 | /* | |
1216 | * If the timer has not already been started, do so. Use a "quick" | |
1217 | * timeout so the function will be called soon, to look at our request. | |
1218 | */ | |
1219 | if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { | |
1220 | /* Need to start timer */ | |
a3faf606 MFW |
1221 | setup_timer(&ppd->led_override_timer, run_led_override, |
1222 | (unsigned long)ppd); | |
1223 | ||
77241056 MM |
1224 | ppd->led_override_timer.expires = jiffies + 1; |
1225 | add_timer(&ppd->led_override_timer); | |
1226 | } else { | |
1227 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | |
1228 | mod_timer(&ppd->led_override_timer, jiffies + 1); | |
1229 | atomic_dec(&ppd->led_override_timer_active); | |
1230 | } | |
1231 | } | |
1232 | ||
1233 | /** | |
1234 | * hfi1_reset_device - reset the chip if possible | |
1235 | * @unit: the device to reset | |
1236 | * | |
1237 | * Whether or not reset is successful, we attempt to re-initialize the chip | |
1238 | * (that is, much like a driver unload/reload). We clear the INITTED flag | |
1239 | * so that the various entry points will fail until we reinitialize. For | |
1240 | * now, we only allow this if no user contexts are open that use chip resources | |
1241 | */ | |
1242 | int hfi1_reset_device(int unit) | |
1243 | { | |
1244 | int ret, i; | |
1245 | struct hfi1_devdata *dd = hfi1_lookup(unit); | |
1246 | struct hfi1_pportdata *ppd; | |
1247 | unsigned long flags; | |
1248 | int pidx; | |
1249 | ||
1250 | if (!dd) { | |
1251 | ret = -ENODEV; | |
1252 | goto bail; | |
1253 | } | |
1254 | ||
1255 | dd_dev_info(dd, "Reset on unit %u requested\n", unit); | |
1256 | ||
1257 | if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) { | |
1258 | dd_dev_info(dd, | |
1259 | "Invalid unit number %u or not initialized or not present\n", | |
1260 | unit); | |
1261 | ret = -ENXIO; | |
1262 | goto bail; | |
1263 | } | |
1264 | ||
1265 | spin_lock_irqsave(&dd->uctxt_lock, flags); | |
1266 | if (dd->rcd) | |
1267 | for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) { | |
1268 | if (!dd->rcd[i] || !dd->rcd[i]->cnt) | |
1269 | continue; | |
1270 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1271 | ret = -EBUSY; | |
1272 | goto bail; | |
1273 | } | |
1274 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | |
1275 | ||
1276 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | |
1277 | ppd = dd->pport + pidx; | |
1278 | if (atomic_read(&ppd->led_override_timer_active)) { | |
1279 | /* Need to stop LED timer, _then_ shut off LEDs */ | |
1280 | del_timer_sync(&ppd->led_override_timer); | |
1281 | atomic_set(&ppd->led_override_timer_active, 0); | |
1282 | } | |
1283 | ||
1284 | /* Shut off LEDs after we are sure timer is not running */ | |
1285 | ppd->led_override = LED_OVER_BOTH_OFF; | |
1286 | } | |
1287 | if (dd->flags & HFI1_HAS_SEND_DMA) | |
1288 | sdma_exit(dd); | |
1289 | ||
1290 | hfi1_reset_cpu_counters(dd); | |
1291 | ||
1292 | ret = hfi1_init(dd, 1); | |
1293 | ||
1294 | if (ret) | |
1295 | dd_dev_err(dd, | |
1296 | "Reinitialize unit %u after reset failed with %d\n", | |
1297 | unit, ret); | |
1298 | else | |
1299 | dd_dev_info(dd, "Reinitialized unit %u after resetting\n", | |
1300 | unit); | |
1301 | ||
1302 | bail: | |
1303 | return ret; | |
1304 | } | |
1305 | ||
1306 | void handle_eflags(struct hfi1_packet *packet) | |
1307 | { | |
1308 | struct hfi1_ctxtdata *rcd = packet->rcd; | |
1309 | u32 rte = rhf_rcv_type_err(packet->rhf); | |
1310 | ||
77241056 | 1311 | rcv_hdrerr(rcd, rcd->ppd, packet); |
a03a03e9 IH |
1312 | if (rhf_err_flags(packet->rhf)) |
1313 | dd_dev_err(rcd->dd, | |
1314 | "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n", | |
1315 | rcd->ctxt, packet->rhf, | |
1316 | packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", | |
1317 | packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", | |
1318 | packet->rhf & RHF_DC_ERR ? "dc " : "", | |
1319 | packet->rhf & RHF_TID_ERR ? "tid " : "", | |
1320 | packet->rhf & RHF_LEN_ERR ? "len " : "", | |
1321 | packet->rhf & RHF_ECC_ERR ? "ecc " : "", | |
1322 | packet->rhf & RHF_VCRC_ERR ? "vcrc " : "", | |
1323 | packet->rhf & RHF_ICRC_ERR ? "icrc " : "", | |
1324 | rte); | |
77241056 MM |
1325 | } |
1326 | ||
1327 | /* | |
1328 | * The following functions are called by the interrupt handler. They are type | |
1329 | * specific handlers for each packet type. | |
1330 | */ | |
1331 | int process_receive_ib(struct hfi1_packet *packet) | |
1332 | { | |
1333 | trace_hfi1_rcvhdr(packet->rcd->ppd->dd, | |
1334 | packet->rcd->ctxt, | |
1335 | rhf_err_flags(packet->rhf), | |
1336 | RHF_RCV_TYPE_IB, | |
1337 | packet->hlen, | |
1338 | packet->tlen, | |
1339 | packet->updegr, | |
1340 | rhf_egr_index(packet->rhf)); | |
1341 | ||
1342 | if (unlikely(rhf_err_flags(packet->rhf))) { | |
1343 | handle_eflags(packet); | |
1344 | return RHF_RCV_CONTINUE; | |
1345 | } | |
1346 | ||
1347 | hfi1_ib_rcv(packet); | |
1348 | return RHF_RCV_CONTINUE; | |
1349 | } | |
1350 | ||
1351 | int process_receive_bypass(struct hfi1_packet *packet) | |
1352 | { | |
1353 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1354 | handle_eflags(packet); | |
1355 | ||
1356 | dd_dev_err(packet->rcd->dd, | |
1357 | "Bypass packets are not supported in normal operation. Dropping\n"); | |
1358 | return RHF_RCV_CONTINUE; | |
1359 | } | |
1360 | ||
1361 | int process_receive_error(struct hfi1_packet *packet) | |
1362 | { | |
1363 | handle_eflags(packet); | |
1364 | ||
1365 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1366 | dd_dev_err(packet->rcd->dd, | |
1367 | "Unhandled error packet received. Dropping.\n"); | |
1368 | ||
1369 | return RHF_RCV_CONTINUE; | |
1370 | } | |
1371 | ||
1372 | int kdeth_process_expected(struct hfi1_packet *packet) | |
1373 | { | |
1374 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1375 | handle_eflags(packet); | |
1376 | ||
1377 | dd_dev_err(packet->rcd->dd, | |
1378 | "Unhandled expected packet received. Dropping.\n"); | |
1379 | return RHF_RCV_CONTINUE; | |
1380 | } | |
1381 | ||
1382 | int kdeth_process_eager(struct hfi1_packet *packet) | |
1383 | { | |
1384 | if (unlikely(rhf_err_flags(packet->rhf))) | |
1385 | handle_eflags(packet); | |
1386 | ||
1387 | dd_dev_err(packet->rcd->dd, | |
1388 | "Unhandled eager packet received. Dropping.\n"); | |
1389 | return RHF_RCV_CONTINUE; | |
1390 | } | |
1391 | ||
1392 | int process_receive_invalid(struct hfi1_packet *packet) | |
1393 | { | |
1394 | dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", | |
1395 | rhf_rcv_type(packet->rhf)); | |
1396 | return RHF_RCV_CONTINUE; | |
1397 | } |