]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/hfi1/pio.c
Merge branch 'i2c/for-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / hfi1 / pio.c
1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/delay.h>
49 #include "hfi.h"
50 #include "qp.h"
51 #include "trace.h"
52
53 #define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
54
55 #define SC(name) SEND_CTXT_##name
56 /*
57 * Send Context functions
58 */
59 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
60
61 /*
62 * Set the CM reset bit and wait for it to clear. Use the provided
63 * sendctrl register. This routine has no locking.
64 */
65 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
66 {
67 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
68 while (1) {
69 udelay(1);
70 sendctrl = read_csr(dd, SEND_CTRL);
71 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
72 break;
73 }
74 }
75
76 /* defined in header release 48 and higher */
77 #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
78 #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
79 #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
80 #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
81 << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
82 #endif
83
84 /* global control of PIO send */
85 void pio_send_control(struct hfi1_devdata *dd, int op)
86 {
87 u64 reg, mask;
88 unsigned long flags;
89 int write = 1; /* write sendctrl back */
90 int flush = 0; /* re-read sendctrl to make sure it is flushed */
91
92 spin_lock_irqsave(&dd->sendctrl_lock, flags);
93
94 reg = read_csr(dd, SEND_CTRL);
95 switch (op) {
96 case PSC_GLOBAL_ENABLE:
97 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
98 /* Fall through */
99 case PSC_DATA_VL_ENABLE:
100 /* Disallow sending on VLs not enabled */
101 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
102 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
103 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
104 break;
105 case PSC_GLOBAL_DISABLE:
106 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
107 break;
108 case PSC_GLOBAL_VLARB_ENABLE:
109 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
110 break;
111 case PSC_GLOBAL_VLARB_DISABLE:
112 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
113 break;
114 case PSC_CM_RESET:
115 __cm_reset(dd, reg);
116 write = 0; /* CSR already written (and flushed) */
117 break;
118 case PSC_DATA_VL_DISABLE:
119 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
120 flush = 1;
121 break;
122 default:
123 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
124 break;
125 }
126
127 if (write) {
128 write_csr(dd, SEND_CTRL, reg);
129 if (flush)
130 (void)read_csr(dd, SEND_CTRL); /* flush write */
131 }
132
133 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
134 }
135
136 /* number of send context memory pools */
137 #define NUM_SC_POOLS 2
138
139 /* Send Context Size (SCS) wildcards */
140 #define SCS_POOL_0 -1
141 #define SCS_POOL_1 -2
142
143 /* Send Context Count (SCC) wildcards */
144 #define SCC_PER_VL -1
145 #define SCC_PER_CPU -2
146 #define SCC_PER_KRCVQ -3
147
148 /* Send Context Size (SCS) constants */
149 #define SCS_ACK_CREDITS 32
150 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */
151
152 #define PIO_THRESHOLD_CEILING 4096
153
154 #define PIO_WAIT_BATCH_SIZE 5
155
156 /* default send context sizes */
157 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
158 [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
159 .count = SCC_PER_VL }, /* one per NUMA */
160 [SC_ACK] = { .size = SCS_ACK_CREDITS,
161 .count = SCC_PER_KRCVQ },
162 [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
163 .count = SCC_PER_CPU }, /* one per CPU */
164 [SC_VL15] = { .size = SCS_VL15_CREDITS,
165 .count = 1 },
166
167 };
168
169 /* send context memory pool configuration */
170 struct mem_pool_config {
171 int centipercent; /* % of memory, in 100ths of 1% */
172 int absolute_blocks; /* absolute block count */
173 };
174
175 /* default memory pool configuration: 100% in pool 0 */
176 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
177 /* centi%, abs blocks */
178 { 10000, -1 }, /* pool 0 */
179 { 0, -1 }, /* pool 1 */
180 };
181
182 /* memory pool information, used when calculating final sizes */
183 struct mem_pool_info {
184 int centipercent; /*
185 * 100th of 1% of memory to use, -1 if blocks
186 * already set
187 */
188 int count; /* count of contexts in the pool */
189 int blocks; /* block size of the pool */
190 int size; /* context size, in blocks */
191 };
192
193 /*
194 * Convert a pool wildcard to a valid pool index. The wildcards
195 * start at -1 and increase negatively. Map them as:
196 * -1 => 0
197 * -2 => 1
198 * etc.
199 *
200 * Return -1 on non-wildcard input, otherwise convert to a pool number.
201 */
202 static int wildcard_to_pool(int wc)
203 {
204 if (wc >= 0)
205 return -1; /* non-wildcard */
206 return -wc - 1;
207 }
208
209 static const char *sc_type_names[SC_MAX] = {
210 "kernel",
211 "ack",
212 "user",
213 "vl15"
214 };
215
216 static const char *sc_type_name(int index)
217 {
218 if (index < 0 || index >= SC_MAX)
219 return "unknown";
220 return sc_type_names[index];
221 }
222
223 /*
224 * Read the send context memory pool configuration and send context
225 * size configuration. Replace any wildcards and come up with final
226 * counts and sizes for the send context types.
227 */
228 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
229 {
230 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
231 int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
232 int total_contexts = 0;
233 int fixed_blocks;
234 int pool_blocks;
235 int used_blocks;
236 int cp_total; /* centipercent total */
237 int ab_total; /* absolute block total */
238 int extra;
239 int i;
240
241 /*
242 * When SDMA is enabled, kernel context pio packet size is capped by
243 * "piothreshold". Reduce pio buffer allocation for kernel context by
244 * setting it to a fixed size. The allocation allows 3-deep buffering
245 * of the largest pio packets plus up to 128 bytes header, sufficient
246 * to maintain verbs performance.
247 *
248 * When SDMA is disabled, keep the default pooling allocation.
249 */
250 if (HFI1_CAP_IS_KSET(SDMA)) {
251 u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
252 piothreshold : PIO_THRESHOLD_CEILING;
253 sc_config_sizes[SC_KERNEL].size =
254 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
255 }
256
257 /*
258 * Step 0:
259 * - copy the centipercents/absolute sizes from the pool config
260 * - sanity check these values
261 * - add up centipercents, then later check for full value
262 * - add up absolute blocks, then later check for over-commit
263 */
264 cp_total = 0;
265 ab_total = 0;
266 for (i = 0; i < NUM_SC_POOLS; i++) {
267 int cp = sc_mem_pool_config[i].centipercent;
268 int ab = sc_mem_pool_config[i].absolute_blocks;
269
270 /*
271 * A negative value is "unused" or "invalid". Both *can*
272 * be valid, but centipercent wins, so check that first
273 */
274 if (cp >= 0) { /* centipercent valid */
275 cp_total += cp;
276 } else if (ab >= 0) { /* absolute blocks valid */
277 ab_total += ab;
278 } else { /* neither valid */
279 dd_dev_err(
280 dd,
281 "Send context memory pool %d: both the block count and centipercent are invalid\n",
282 i);
283 return -EINVAL;
284 }
285
286 mem_pool_info[i].centipercent = cp;
287 mem_pool_info[i].blocks = ab;
288 }
289
290 /* do not use both % and absolute blocks for different pools */
291 if (cp_total != 0 && ab_total != 0) {
292 dd_dev_err(
293 dd,
294 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
295 return -EINVAL;
296 }
297
298 /* if any percentages are present, they must add up to 100% x 100 */
299 if (cp_total != 0 && cp_total != 10000) {
300 dd_dev_err(
301 dd,
302 "Send context memory pool centipercent is %d, expecting 10000\n",
303 cp_total);
304 return -EINVAL;
305 }
306
307 /* the absolute pool total cannot be more than the mem total */
308 if (ab_total > total_blocks) {
309 dd_dev_err(
310 dd,
311 "Send context memory pool absolute block count %d is larger than the memory size %d\n",
312 ab_total, total_blocks);
313 return -EINVAL;
314 }
315
316 /*
317 * Step 2:
318 * - copy from the context size config
319 * - replace context type wildcard counts with real values
320 * - add up non-memory pool block sizes
321 * - add up memory pool user counts
322 */
323 fixed_blocks = 0;
324 for (i = 0; i < SC_MAX; i++) {
325 int count = sc_config_sizes[i].count;
326 int size = sc_config_sizes[i].size;
327 int pool;
328
329 /*
330 * Sanity check count: Either a positive value or
331 * one of the expected wildcards is valid. The positive
332 * value is checked later when we compare against total
333 * memory available.
334 */
335 if (i == SC_ACK) {
336 count = dd->n_krcv_queues;
337 } else if (i == SC_KERNEL) {
338 count = INIT_SC_PER_VL * num_vls;
339 } else if (count == SCC_PER_CPU) {
340 count = dd->num_rcv_contexts - dd->n_krcv_queues;
341 } else if (count < 0) {
342 dd_dev_err(
343 dd,
344 "%s send context invalid count wildcard %d\n",
345 sc_type_name(i), count);
346 return -EINVAL;
347 }
348 if (total_contexts + count > dd->chip_send_contexts)
349 count = dd->chip_send_contexts - total_contexts;
350
351 total_contexts += count;
352
353 /*
354 * Sanity check pool: The conversion will return a pool
355 * number or -1 if a fixed (non-negative) value. The fixed
356 * value is checked later when we compare against
357 * total memory available.
358 */
359 pool = wildcard_to_pool(size);
360 if (pool == -1) { /* non-wildcard */
361 fixed_blocks += size * count;
362 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
363 mem_pool_info[pool].count += count;
364 } else { /* invalid wildcard */
365 dd_dev_err(
366 dd,
367 "%s send context invalid pool wildcard %d\n",
368 sc_type_name(i), size);
369 return -EINVAL;
370 }
371
372 dd->sc_sizes[i].count = count;
373 dd->sc_sizes[i].size = size;
374 }
375 if (fixed_blocks > total_blocks) {
376 dd_dev_err(
377 dd,
378 "Send context fixed block count, %u, larger than total block count %u\n",
379 fixed_blocks, total_blocks);
380 return -EINVAL;
381 }
382
383 /* step 3: calculate the blocks in the pools, and pool context sizes */
384 pool_blocks = total_blocks - fixed_blocks;
385 if (ab_total > pool_blocks) {
386 dd_dev_err(
387 dd,
388 "Send context fixed pool sizes, %u, larger than pool block count %u\n",
389 ab_total, pool_blocks);
390 return -EINVAL;
391 }
392 /* subtract off the fixed pool blocks */
393 pool_blocks -= ab_total;
394
395 for (i = 0; i < NUM_SC_POOLS; i++) {
396 struct mem_pool_info *pi = &mem_pool_info[i];
397
398 /* % beats absolute blocks */
399 if (pi->centipercent >= 0)
400 pi->blocks = (pool_blocks * pi->centipercent) / 10000;
401
402 if (pi->blocks == 0 && pi->count != 0) {
403 dd_dev_err(
404 dd,
405 "Send context memory pool %d has %u contexts, but no blocks\n",
406 i, pi->count);
407 return -EINVAL;
408 }
409 if (pi->count == 0) {
410 /* warn about wasted blocks */
411 if (pi->blocks != 0)
412 dd_dev_err(
413 dd,
414 "Send context memory pool %d has %u blocks, but zero contexts\n",
415 i, pi->blocks);
416 pi->size = 0;
417 } else {
418 pi->size = pi->blocks / pi->count;
419 }
420 }
421
422 /* step 4: fill in the context type sizes from the pool sizes */
423 used_blocks = 0;
424 for (i = 0; i < SC_MAX; i++) {
425 if (dd->sc_sizes[i].size < 0) {
426 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
427
428 WARN_ON_ONCE(pool >= NUM_SC_POOLS);
429 dd->sc_sizes[i].size = mem_pool_info[pool].size;
430 }
431 /* make sure we are not larger than what is allowed by the HW */
432 #define PIO_MAX_BLOCKS 1024
433 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
434 dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
435
436 /* calculate our total usage */
437 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
438 }
439 extra = total_blocks - used_blocks;
440 if (extra != 0)
441 dd_dev_info(dd, "unused send context blocks: %d\n", extra);
442
443 return total_contexts;
444 }
445
446 int init_send_contexts(struct hfi1_devdata *dd)
447 {
448 u16 base;
449 int ret, i, j, context;
450
451 ret = init_credit_return(dd);
452 if (ret)
453 return ret;
454
455 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
456 GFP_KERNEL);
457 dd->send_contexts = kcalloc(dd->num_send_contexts,
458 sizeof(struct send_context_info),
459 GFP_KERNEL);
460 if (!dd->send_contexts || !dd->hw_to_sw) {
461 kfree(dd->hw_to_sw);
462 kfree(dd->send_contexts);
463 free_credit_return(dd);
464 return -ENOMEM;
465 }
466
467 /* hardware context map starts with invalid send context indices */
468 for (i = 0; i < TXE_NUM_CONTEXTS; i++)
469 dd->hw_to_sw[i] = INVALID_SCI;
470
471 /*
472 * All send contexts have their credit sizes. Allocate credits
473 * for each context one after another from the global space.
474 */
475 context = 0;
476 base = 1;
477 for (i = 0; i < SC_MAX; i++) {
478 struct sc_config_sizes *scs = &dd->sc_sizes[i];
479
480 for (j = 0; j < scs->count; j++) {
481 struct send_context_info *sci =
482 &dd->send_contexts[context];
483 sci->type = i;
484 sci->base = base;
485 sci->credits = scs->size;
486
487 context++;
488 base += scs->size;
489 }
490 }
491
492 return 0;
493 }
494
495 /*
496 * Allocate a software index and hardware context of the given type.
497 *
498 * Must be called with dd->sc_lock held.
499 */
500 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
501 u32 *hw_context)
502 {
503 struct send_context_info *sci;
504 u32 index;
505 u32 context;
506
507 for (index = 0, sci = &dd->send_contexts[0];
508 index < dd->num_send_contexts; index++, sci++) {
509 if (sci->type == type && sci->allocated == 0) {
510 sci->allocated = 1;
511 /* use a 1:1 mapping, but make them non-equal */
512 context = dd->chip_send_contexts - index - 1;
513 dd->hw_to_sw[context] = index;
514 *sw_index = index;
515 *hw_context = context;
516 return 0; /* success */
517 }
518 }
519 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
520 return -ENOSPC;
521 }
522
523 /*
524 * Free the send context given by its software index.
525 *
526 * Must be called with dd->sc_lock held.
527 */
528 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
529 {
530 struct send_context_info *sci;
531
532 sci = &dd->send_contexts[sw_index];
533 if (!sci->allocated) {
534 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
535 __func__, sw_index, hw_context);
536 }
537 sci->allocated = 0;
538 dd->hw_to_sw[hw_context] = INVALID_SCI;
539 }
540
541 /* return the base context of a context in a group */
542 static inline u32 group_context(u32 context, u32 group)
543 {
544 return (context >> group) << group;
545 }
546
547 /* return the size of a group */
548 static inline u32 group_size(u32 group)
549 {
550 return 1 << group;
551 }
552
553 /*
554 * Obtain the credit return addresses, kernel virtual and bus, for the
555 * given sc.
556 *
557 * To understand this routine:
558 * o va and dma are arrays of struct credit_return. One for each physical
559 * send context, per NUMA.
560 * o Each send context always looks in its relative location in a struct
561 * credit_return for its credit return.
562 * o Each send context in a group must have its return address CSR programmed
563 * with the same value. Use the address of the first send context in the
564 * group.
565 */
566 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
567 {
568 u32 gc = group_context(sc->hw_context, sc->group);
569 u32 index = sc->hw_context & 0x7;
570
571 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
572 *dma = (unsigned long)
573 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
574 }
575
576 /*
577 * Work queue function triggered in error interrupt routine for
578 * kernel contexts.
579 */
580 static void sc_halted(struct work_struct *work)
581 {
582 struct send_context *sc;
583
584 sc = container_of(work, struct send_context, halt_work);
585 sc_restart(sc);
586 }
587
588 /*
589 * Calculate PIO block threshold for this send context using the given MTU.
590 * Trigger a return when one MTU plus optional header of credits remain.
591 *
592 * Parameter mtu is in bytes.
593 * Parameter hdrqentsize is in DWORDs.
594 *
595 * Return value is what to write into the CSR: trigger return when
596 * unreturned credits pass this count.
597 */
598 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
599 {
600 u32 release_credits;
601 u32 threshold;
602
603 /* add in the header size, then divide by the PIO block size */
604 mtu += hdrqentsize << 2;
605 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
606
607 /* check against this context's credits */
608 if (sc->credits <= release_credits)
609 threshold = 1;
610 else
611 threshold = sc->credits - release_credits;
612
613 return threshold;
614 }
615
616 /*
617 * Calculate credit threshold in terms of percent of the allocated credits.
618 * Trigger when unreturned credits equal or exceed the percentage of the whole.
619 *
620 * Return value is what to write into the CSR: trigger return when
621 * unreturned credits pass this count.
622 */
623 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
624 {
625 return (sc->credits * percent) / 100;
626 }
627
628 /*
629 * Set the credit return threshold.
630 */
631 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
632 {
633 unsigned long flags;
634 u32 old_threshold;
635 int force_return = 0;
636
637 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
638
639 old_threshold = (sc->credit_ctrl >>
640 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
641 & SC(CREDIT_CTRL_THRESHOLD_MASK);
642
643 if (new_threshold != old_threshold) {
644 sc->credit_ctrl =
645 (sc->credit_ctrl
646 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
647 | ((new_threshold
648 & SC(CREDIT_CTRL_THRESHOLD_MASK))
649 << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
650 write_kctxt_csr(sc->dd, sc->hw_context,
651 SC(CREDIT_CTRL), sc->credit_ctrl);
652
653 /* force a credit return on change to avoid a possible stall */
654 force_return = 1;
655 }
656
657 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
658
659 if (force_return)
660 sc_return_credits(sc);
661 }
662
663 /*
664 * set_pio_integrity
665 *
666 * Set the CHECK_ENABLE register for the send context 'sc'.
667 */
668 void set_pio_integrity(struct send_context *sc)
669 {
670 struct hfi1_devdata *dd = sc->dd;
671 u32 hw_context = sc->hw_context;
672 int type = sc->type;
673
674 write_kctxt_csr(dd, hw_context,
675 SC(CHECK_ENABLE),
676 hfi1_pkt_default_send_ctxt_mask(dd, type));
677 }
678
679 static u32 get_buffers_allocated(struct send_context *sc)
680 {
681 int cpu;
682 u32 ret = 0;
683
684 for_each_possible_cpu(cpu)
685 ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
686 return ret;
687 }
688
689 static void reset_buffers_allocated(struct send_context *sc)
690 {
691 int cpu;
692
693 for_each_possible_cpu(cpu)
694 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
695 }
696
697 /*
698 * Allocate a NUMA relative send context structure of the given type along
699 * with a HW context.
700 */
701 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
702 uint hdrqentsize, int numa)
703 {
704 struct send_context_info *sci;
705 struct send_context *sc = NULL;
706 dma_addr_t dma;
707 unsigned long flags;
708 u64 reg;
709 u32 thresh;
710 u32 sw_index;
711 u32 hw_context;
712 int ret;
713 u8 opval, opmask;
714
715 /* do not allocate while frozen */
716 if (dd->flags & HFI1_FROZEN)
717 return NULL;
718
719 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
720 if (!sc)
721 return NULL;
722
723 sc->buffers_allocated = alloc_percpu(u32);
724 if (!sc->buffers_allocated) {
725 kfree(sc);
726 dd_dev_err(dd,
727 "Cannot allocate buffers_allocated per cpu counters\n"
728 );
729 return NULL;
730 }
731
732 spin_lock_irqsave(&dd->sc_lock, flags);
733 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
734 if (ret) {
735 spin_unlock_irqrestore(&dd->sc_lock, flags);
736 free_percpu(sc->buffers_allocated);
737 kfree(sc);
738 return NULL;
739 }
740
741 sci = &dd->send_contexts[sw_index];
742 sci->sc = sc;
743
744 sc->dd = dd;
745 sc->node = numa;
746 sc->type = type;
747 spin_lock_init(&sc->alloc_lock);
748 spin_lock_init(&sc->release_lock);
749 spin_lock_init(&sc->credit_ctrl_lock);
750 INIT_LIST_HEAD(&sc->piowait);
751 INIT_WORK(&sc->halt_work, sc_halted);
752 init_waitqueue_head(&sc->halt_wait);
753
754 /* grouping is always single context for now */
755 sc->group = 0;
756
757 sc->sw_index = sw_index;
758 sc->hw_context = hw_context;
759 cr_group_addresses(sc, &dma);
760 sc->credits = sci->credits;
761 sc->size = sc->credits * PIO_BLOCK_SIZE;
762
763 /* PIO Send Memory Address details */
764 #define PIO_ADDR_CONTEXT_MASK 0xfful
765 #define PIO_ADDR_CONTEXT_SHIFT 16
766 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
767 << PIO_ADDR_CONTEXT_SHIFT);
768
769 /* set base and credits */
770 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
771 << SC(CTRL_CTXT_DEPTH_SHIFT))
772 | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
773 << SC(CTRL_CTXT_BASE_SHIFT));
774 write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
775
776 set_pio_integrity(sc);
777
778 /* unmask all errors */
779 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
780
781 /* set the default partition key */
782 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
783 (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
784 DEFAULT_PKEY) <<
785 SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
786
787 /* per context type checks */
788 if (type == SC_USER) {
789 opval = USER_OPCODE_CHECK_VAL;
790 opmask = USER_OPCODE_CHECK_MASK;
791 } else {
792 opval = OPCODE_CHECK_VAL_DISABLED;
793 opmask = OPCODE_CHECK_MASK_DISABLED;
794 }
795
796 /* set the send context check opcode mask and value */
797 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
798 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
799 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
800
801 /* set up credit return */
802 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
803 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
804
805 /*
806 * Calculate the initial credit return threshold.
807 *
808 * For Ack contexts, set a threshold for half the credits.
809 * For User contexts use the given percentage. This has been
810 * sanitized on driver start-up.
811 * For Kernel contexts, use the default MTU plus a header
812 * or half the credits, whichever is smaller. This should
813 * work for both the 3-deep buffering allocation and the
814 * pooling allocation.
815 */
816 if (type == SC_ACK) {
817 thresh = sc_percent_to_threshold(sc, 50);
818 } else if (type == SC_USER) {
819 thresh = sc_percent_to_threshold(sc,
820 user_credit_return_threshold);
821 } else { /* kernel */
822 thresh = min(sc_percent_to_threshold(sc, 50),
823 sc_mtu_to_threshold(sc, hfi1_max_mtu,
824 hdrqentsize));
825 }
826 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
827 /* add in early return */
828 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
829 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
830 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
831 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
832
833 /* set up write-through credit_ctrl */
834 sc->credit_ctrl = reg;
835 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
836
837 /* User send contexts should not allow sending on VL15 */
838 if (type == SC_USER) {
839 reg = 1ULL << 15;
840 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
841 }
842
843 spin_unlock_irqrestore(&dd->sc_lock, flags);
844
845 /*
846 * Allocate shadow ring to track outstanding PIO buffers _after_
847 * unlocking. We don't know the size until the lock is held and
848 * we can't allocate while the lock is held. No one is using
849 * the context yet, so allocate it now.
850 *
851 * User contexts do not get a shadow ring.
852 */
853 if (type != SC_USER) {
854 /*
855 * Size the shadow ring 1 larger than the number of credits
856 * so head == tail can mean empty.
857 */
858 sc->sr_size = sci->credits + 1;
859 sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
860 sc->sr_size, GFP_KERNEL, numa);
861 if (!sc->sr) {
862 sc_free(sc);
863 return NULL;
864 }
865 }
866
867 hfi1_cdbg(PIO,
868 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
869 sw_index,
870 hw_context,
871 sc_type_name(type),
872 sc->group,
873 sc->credits,
874 sc->credit_ctrl,
875 thresh);
876
877 return sc;
878 }
879
880 /* free a per-NUMA send context structure */
881 void sc_free(struct send_context *sc)
882 {
883 struct hfi1_devdata *dd;
884 unsigned long flags;
885 u32 sw_index;
886 u32 hw_context;
887
888 if (!sc)
889 return;
890
891 sc->flags |= SCF_IN_FREE; /* ensure no restarts */
892 dd = sc->dd;
893 if (!list_empty(&sc->piowait))
894 dd_dev_err(dd, "piowait list not empty!\n");
895 sw_index = sc->sw_index;
896 hw_context = sc->hw_context;
897 sc_disable(sc); /* make sure the HW is disabled */
898 flush_work(&sc->halt_work);
899
900 spin_lock_irqsave(&dd->sc_lock, flags);
901 dd->send_contexts[sw_index].sc = NULL;
902
903 /* clear/disable all registers set in sc_alloc */
904 write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
905 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
906 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
907 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
908 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
909 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
910 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
911
912 /* release the index and context for re-use */
913 sc_hw_free(dd, sw_index, hw_context);
914 spin_unlock_irqrestore(&dd->sc_lock, flags);
915
916 kfree(sc->sr);
917 free_percpu(sc->buffers_allocated);
918 kfree(sc);
919 }
920
921 /* disable the context */
922 void sc_disable(struct send_context *sc)
923 {
924 u64 reg;
925 unsigned long flags;
926 struct pio_buf *pbuf;
927
928 if (!sc)
929 return;
930
931 /* do all steps, even if already disabled */
932 spin_lock_irqsave(&sc->alloc_lock, flags);
933 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
934 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
935 sc->flags &= ~SCF_ENABLED;
936 sc_wait_for_packet_egress(sc, 1);
937 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
938 spin_unlock_irqrestore(&sc->alloc_lock, flags);
939
940 /*
941 * Flush any waiters. Once the context is disabled,
942 * credit return interrupts are stopped (although there
943 * could be one in-process when the context is disabled).
944 * Wait one microsecond for any lingering interrupts, then
945 * proceed with the flush.
946 */
947 udelay(1);
948 spin_lock_irqsave(&sc->release_lock, flags);
949 if (sc->sr) { /* this context has a shadow ring */
950 while (sc->sr_tail != sc->sr_head) {
951 pbuf = &sc->sr[sc->sr_tail].pbuf;
952 if (pbuf->cb)
953 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
954 sc->sr_tail++;
955 if (sc->sr_tail >= sc->sr_size)
956 sc->sr_tail = 0;
957 }
958 }
959 spin_unlock_irqrestore(&sc->release_lock, flags);
960 }
961
962 /* return SendEgressCtxtStatus.PacketOccupancy */
963 #define packet_occupancy(r) \
964 (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
965 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
966
967 /* is egress halted on the context? */
968 #define egress_halted(r) \
969 ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
970
971 /* wait for packet egress, optionally pause for credit return */
972 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
973 {
974 struct hfi1_devdata *dd = sc->dd;
975 u64 reg = 0;
976 u64 reg_prev;
977 u32 loop = 0;
978
979 while (1) {
980 reg_prev = reg;
981 reg = read_csr(dd, sc->hw_context * 8 +
982 SEND_EGRESS_CTXT_STATUS);
983 /* done if egress is stopped */
984 if (egress_halted(reg))
985 break;
986 reg = packet_occupancy(reg);
987 if (reg == 0)
988 break;
989 /* counter is reset if occupancy count changes */
990 if (reg != reg_prev)
991 loop = 0;
992 if (loop > 50000) {
993 /* timed out - bounce the link */
994 dd_dev_err(dd,
995 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
996 __func__, sc->sw_index,
997 sc->hw_context, (u32)reg);
998 queue_work(dd->pport->hfi1_wq,
999 &dd->pport->link_bounce_work);
1000 break;
1001 }
1002 loop++;
1003 udelay(1);
1004 }
1005
1006 if (pause)
1007 /* Add additional delay to ensure chip returns all credits */
1008 pause_for_credit_return(dd);
1009 }
1010
1011 void sc_wait(struct hfi1_devdata *dd)
1012 {
1013 int i;
1014
1015 for (i = 0; i < dd->num_send_contexts; i++) {
1016 struct send_context *sc = dd->send_contexts[i].sc;
1017
1018 if (!sc)
1019 continue;
1020 sc_wait_for_packet_egress(sc, 0);
1021 }
1022 }
1023
1024 /*
1025 * Restart a context after it has been halted due to error.
1026 *
1027 * If the first step fails - wait for the halt to be asserted, return early.
1028 * Otherwise complain about timeouts but keep going.
1029 *
1030 * It is expected that allocations (enabled flag bit) have been shut off
1031 * already (only applies to kernel contexts).
1032 */
1033 int sc_restart(struct send_context *sc)
1034 {
1035 struct hfi1_devdata *dd = sc->dd;
1036 u64 reg;
1037 u32 loop;
1038 int count;
1039
1040 /* bounce off if not halted, or being free'd */
1041 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1042 return -EINVAL;
1043
1044 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1045 sc->hw_context);
1046
1047 /*
1048 * Step 1: Wait for the context to actually halt.
1049 *
1050 * The error interrupt is asynchronous to actually setting halt
1051 * on the context.
1052 */
1053 loop = 0;
1054 while (1) {
1055 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1056 if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1057 break;
1058 if (loop > 100) {
1059 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1060 __func__, sc->sw_index, sc->hw_context);
1061 return -ETIME;
1062 }
1063 loop++;
1064 udelay(1);
1065 }
1066
1067 /*
1068 * Step 2: Ensure no users are still trying to write to PIO.
1069 *
1070 * For kernel contexts, we have already turned off buffer allocation.
1071 * Now wait for the buffer count to go to zero.
1072 *
1073 * For user contexts, the user handling code has cut off write access
1074 * to the context's PIO pages before calling this routine and will
1075 * restore write access after this routine returns.
1076 */
1077 if (sc->type != SC_USER) {
1078 /* kernel context */
1079 loop = 0;
1080 while (1) {
1081 count = get_buffers_allocated(sc);
1082 if (count == 0)
1083 break;
1084 if (loop > 100) {
1085 dd_dev_err(dd,
1086 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1087 __func__, sc->sw_index,
1088 sc->hw_context, count);
1089 }
1090 loop++;
1091 udelay(1);
1092 }
1093 }
1094
1095 /*
1096 * Step 3: Wait for all packets to egress.
1097 * This is done while disabling the send context
1098 *
1099 * Step 4: Disable the context
1100 *
1101 * This is a superset of the halt. After the disable, the
1102 * errors can be cleared.
1103 */
1104 sc_disable(sc);
1105
1106 /*
1107 * Step 5: Enable the context
1108 *
1109 * This enable will clear the halted flag and per-send context
1110 * error flags.
1111 */
1112 return sc_enable(sc);
1113 }
1114
1115 /*
1116 * PIO freeze processing. To be called after the TXE block is fully frozen.
1117 * Go through all frozen send contexts and disable them. The contexts are
1118 * already stopped by the freeze.
1119 */
1120 void pio_freeze(struct hfi1_devdata *dd)
1121 {
1122 struct send_context *sc;
1123 int i;
1124
1125 for (i = 0; i < dd->num_send_contexts; i++) {
1126 sc = dd->send_contexts[i].sc;
1127 /*
1128 * Don't disable unallocated, unfrozen, or user send contexts.
1129 * User send contexts will be disabled when the process
1130 * calls into the driver to reset its context.
1131 */
1132 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1133 continue;
1134
1135 /* only need to disable, the context is already stopped */
1136 sc_disable(sc);
1137 }
1138 }
1139
1140 /*
1141 * Unfreeze PIO for kernel send contexts. The precondition for calling this
1142 * is that all PIO send contexts have been disabled and the SPC freeze has
1143 * been cleared. Now perform the last step and re-enable each kernel context.
1144 * User (PSM) processing will occur when PSM calls into the kernel to
1145 * acknowledge the freeze.
1146 */
1147 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1148 {
1149 struct send_context *sc;
1150 int i;
1151
1152 for (i = 0; i < dd->num_send_contexts; i++) {
1153 sc = dd->send_contexts[i].sc;
1154 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1155 continue;
1156
1157 sc_enable(sc); /* will clear the sc frozen flag */
1158 }
1159 }
1160
1161 /*
1162 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1163 * Returns:
1164 * -ETIMEDOUT - if we wait too long
1165 * -EIO - if there was an error
1166 */
1167 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1168 {
1169 u64 reg;
1170 int max, count = 0;
1171
1172 /* max is the longest possible HW init time / delay */
1173 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1174 while (1) {
1175 reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1176 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1177 break;
1178 if (count >= max)
1179 return -ETIMEDOUT;
1180 udelay(5);
1181 count++;
1182 }
1183
1184 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1185 }
1186
1187 /*
1188 * Reset all of the send contexts to their power-on state. Used
1189 * only during manual init - no lock against sc_enable needed.
1190 */
1191 void pio_reset_all(struct hfi1_devdata *dd)
1192 {
1193 int ret;
1194
1195 /* make sure the init engine is not busy */
1196 ret = pio_init_wait_progress(dd);
1197 /* ignore any timeout */
1198 if (ret == -EIO) {
1199 /* clear the error */
1200 write_csr(dd, SEND_PIO_ERR_CLEAR,
1201 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1202 }
1203
1204 /* reset init all */
1205 write_csr(dd, SEND_PIO_INIT_CTXT,
1206 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1207 udelay(2);
1208 ret = pio_init_wait_progress(dd);
1209 if (ret < 0) {
1210 dd_dev_err(dd,
1211 "PIO send context init %s while initializing all PIO blocks\n",
1212 ret == -ETIMEDOUT ? "is stuck" : "had an error");
1213 }
1214 }
1215
1216 /* enable the context */
1217 int sc_enable(struct send_context *sc)
1218 {
1219 u64 sc_ctrl, reg, pio;
1220 struct hfi1_devdata *dd;
1221 unsigned long flags;
1222 int ret = 0;
1223
1224 if (!sc)
1225 return -EINVAL;
1226 dd = sc->dd;
1227
1228 /*
1229 * Obtain the allocator lock to guard against any allocation
1230 * attempts (which should not happen prior to context being
1231 * enabled). On the release/disable side we don't need to
1232 * worry about locking since the releaser will not do anything
1233 * if the context accounting values have not changed.
1234 */
1235 spin_lock_irqsave(&sc->alloc_lock, flags);
1236 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1237 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1238 goto unlock; /* already enabled */
1239
1240 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1241
1242 *sc->hw_free = 0;
1243 sc->free = 0;
1244 sc->alloc_free = 0;
1245 sc->fill = 0;
1246 sc->fill_wrap = 0;
1247 sc->sr_head = 0;
1248 sc->sr_tail = 0;
1249 sc->flags = 0;
1250 /* the alloc lock insures no fast path allocation */
1251 reset_buffers_allocated(sc);
1252
1253 /*
1254 * Clear all per-context errors. Some of these will be set when
1255 * we are re-enabling after a context halt. Now that the context
1256 * is disabled, the halt will not clear until after the PIO init
1257 * engine runs below.
1258 */
1259 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1260 if (reg)
1261 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1262
1263 /*
1264 * The HW PIO initialization engine can handle only one init
1265 * request at a time. Serialize access to each device's engine.
1266 */
1267 spin_lock(&dd->sc_init_lock);
1268 /*
1269 * Since access to this code block is serialized and
1270 * each access waits for the initialization to complete
1271 * before releasing the lock, the PIO initialization engine
1272 * should not be in use, so we don't have to wait for the
1273 * InProgress bit to go down.
1274 */
1275 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1276 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1277 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1278 write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1279 /*
1280 * Wait until the engine is done. Give the chip the required time
1281 * so, hopefully, we read the register just once.
1282 */
1283 udelay(2);
1284 ret = pio_init_wait_progress(dd);
1285 spin_unlock(&dd->sc_init_lock);
1286 if (ret) {
1287 dd_dev_err(dd,
1288 "sctxt%u(%u): Context not enabled due to init failure %d\n",
1289 sc->sw_index, sc->hw_context, ret);
1290 goto unlock;
1291 }
1292
1293 /*
1294 * All is well. Enable the context.
1295 */
1296 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1297 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1298 /*
1299 * Read SendCtxtCtrl to force the write out and prevent a timing
1300 * hazard where a PIO write may reach the context before the enable.
1301 */
1302 read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1303 sc->flags |= SCF_ENABLED;
1304
1305 unlock:
1306 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1307
1308 return ret;
1309 }
1310
1311 /* force a credit return on the context */
1312 void sc_return_credits(struct send_context *sc)
1313 {
1314 if (!sc)
1315 return;
1316
1317 /* a 0->1 transition schedules a credit return */
1318 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1319 SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1320 /*
1321 * Ensure that the write is flushed and the credit return is
1322 * scheduled. We care more about the 0 -> 1 transition.
1323 */
1324 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1325 /* set back to 0 for next time */
1326 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1327 }
1328
1329 /* allow all in-flight packets to drain on the context */
1330 void sc_flush(struct send_context *sc)
1331 {
1332 if (!sc)
1333 return;
1334
1335 sc_wait_for_packet_egress(sc, 1);
1336 }
1337
1338 /* drop all packets on the context, no waiting until they are sent */
1339 void sc_drop(struct send_context *sc)
1340 {
1341 if (!sc)
1342 return;
1343
1344 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1345 __func__, sc->sw_index, sc->hw_context);
1346 }
1347
1348 /*
1349 * Start the software reaction to a context halt or SPC freeze:
1350 * - mark the context as halted or frozen
1351 * - stop buffer allocations
1352 *
1353 * Called from the error interrupt. Other work is deferred until
1354 * out of the interrupt.
1355 */
1356 void sc_stop(struct send_context *sc, int flag)
1357 {
1358 unsigned long flags;
1359
1360 /* mark the context */
1361 sc->flags |= flag;
1362
1363 /* stop buffer allocations */
1364 spin_lock_irqsave(&sc->alloc_lock, flags);
1365 sc->flags &= ~SCF_ENABLED;
1366 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1367 wake_up(&sc->halt_wait);
1368 }
1369
1370 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1371 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1372
1373 /*
1374 * The send context buffer "allocator".
1375 *
1376 * @sc: the PIO send context we are allocating from
1377 * @len: length of whole packet - including PBC - in dwords
1378 * @cb: optional callback to call when the buffer is finished sending
1379 * @arg: argument for cb
1380 *
1381 * Return a pointer to a PIO buffer if successful, NULL if not enough room.
1382 */
1383 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1384 pio_release_cb cb, void *arg)
1385 {
1386 struct pio_buf *pbuf = NULL;
1387 unsigned long flags;
1388 unsigned long avail;
1389 unsigned long blocks = dwords_to_blocks(dw_len);
1390 u32 fill_wrap;
1391 int trycount = 0;
1392 u32 head, next;
1393
1394 spin_lock_irqsave(&sc->alloc_lock, flags);
1395 if (!(sc->flags & SCF_ENABLED)) {
1396 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1397 goto done;
1398 }
1399
1400 retry:
1401 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1402 if (blocks > avail) {
1403 /* not enough room */
1404 if (unlikely(trycount)) { /* already tried to get more room */
1405 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1406 goto done;
1407 }
1408 /* copy from receiver cache line and recalculate */
1409 sc->alloc_free = ACCESS_ONCE(sc->free);
1410 avail =
1411 (unsigned long)sc->credits -
1412 (sc->fill - sc->alloc_free);
1413 if (blocks > avail) {
1414 /* still no room, actively update */
1415 sc_release_update(sc);
1416 sc->alloc_free = ACCESS_ONCE(sc->free);
1417 trycount++;
1418 goto retry;
1419 }
1420 }
1421
1422 /* there is enough room */
1423
1424 preempt_disable();
1425 this_cpu_inc(*sc->buffers_allocated);
1426
1427 /* read this once */
1428 head = sc->sr_head;
1429
1430 /* "allocate" the buffer */
1431 sc->fill += blocks;
1432 fill_wrap = sc->fill_wrap;
1433 sc->fill_wrap += blocks;
1434 if (sc->fill_wrap >= sc->credits)
1435 sc->fill_wrap = sc->fill_wrap - sc->credits;
1436
1437 /*
1438 * Fill the parts that the releaser looks at before moving the head.
1439 * The only necessary piece is the sent_at field. The credits
1440 * we have just allocated cannot have been returned yet, so the
1441 * cb and arg will not be looked at for a "while". Put them
1442 * on this side of the memory barrier anyway.
1443 */
1444 pbuf = &sc->sr[head].pbuf;
1445 pbuf->sent_at = sc->fill;
1446 pbuf->cb = cb;
1447 pbuf->arg = arg;
1448 pbuf->sc = sc; /* could be filled in at sc->sr init time */
1449 /* make sure this is in memory before updating the head */
1450
1451 /* calculate next head index, do not store */
1452 next = head + 1;
1453 if (next >= sc->sr_size)
1454 next = 0;
1455 /*
1456 * update the head - must be last! - the releaser can look at fields
1457 * in pbuf once we move the head
1458 */
1459 smp_wmb();
1460 sc->sr_head = next;
1461 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1462
1463 /* finish filling in the buffer outside the lock */
1464 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1465 pbuf->end = sc->base_addr + sc->size;
1466 pbuf->qw_written = 0;
1467 pbuf->carry_bytes = 0;
1468 pbuf->carry.val64 = 0;
1469 done:
1470 return pbuf;
1471 }
1472
1473 /*
1474 * There are at least two entities that can turn on credit return
1475 * interrupts and they can overlap. Avoid problems by implementing
1476 * a count scheme that is enforced by a lock. The lock is needed because
1477 * the count and CSR write must be paired.
1478 */
1479
1480 /*
1481 * Start credit return interrupts. This is managed by a count. If already
1482 * on, just increment the count.
1483 */
1484 void sc_add_credit_return_intr(struct send_context *sc)
1485 {
1486 unsigned long flags;
1487
1488 /* lock must surround both the count change and the CSR update */
1489 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1490 if (sc->credit_intr_count == 0) {
1491 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1492 write_kctxt_csr(sc->dd, sc->hw_context,
1493 SC(CREDIT_CTRL), sc->credit_ctrl);
1494 }
1495 sc->credit_intr_count++;
1496 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1497 }
1498
1499 /*
1500 * Stop credit return interrupts. This is managed by a count. Decrement the
1501 * count, if the last user, then turn the credit interrupts off.
1502 */
1503 void sc_del_credit_return_intr(struct send_context *sc)
1504 {
1505 unsigned long flags;
1506
1507 WARN_ON(sc->credit_intr_count == 0);
1508
1509 /* lock must surround both the count change and the CSR update */
1510 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1511 sc->credit_intr_count--;
1512 if (sc->credit_intr_count == 0) {
1513 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1514 write_kctxt_csr(sc->dd, sc->hw_context,
1515 SC(CREDIT_CTRL), sc->credit_ctrl);
1516 }
1517 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1518 }
1519
1520 /*
1521 * The caller must be careful when calling this. All needint calls
1522 * must be paired with !needint.
1523 */
1524 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1525 {
1526 if (needint)
1527 sc_add_credit_return_intr(sc);
1528 else
1529 sc_del_credit_return_intr(sc);
1530 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1531 if (needint) {
1532 mmiowb();
1533 sc_return_credits(sc);
1534 }
1535 }
1536
1537 /**
1538 * sc_piobufavail - callback when a PIO buffer is available
1539 * @sc: the send context
1540 *
1541 * This is called from the interrupt handler when a PIO buffer is
1542 * available after hfi1_verbs_send() returned an error that no buffers were
1543 * available. Disable the interrupt if there are no more QPs waiting.
1544 */
1545 static void sc_piobufavail(struct send_context *sc)
1546 {
1547 struct hfi1_devdata *dd = sc->dd;
1548 struct hfi1_ibdev *dev = &dd->verbs_dev;
1549 struct list_head *list;
1550 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1551 struct rvt_qp *qp;
1552 struct hfi1_qp_priv *priv;
1553 unsigned long flags;
1554 unsigned i, n = 0;
1555
1556 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1557 dd->send_contexts[sc->sw_index].type != SC_VL15)
1558 return;
1559 list = &sc->piowait;
1560 /*
1561 * Note: checking that the piowait list is empty and clearing
1562 * the buffer available interrupt needs to be atomic or we
1563 * could end up with QPs on the wait list with the interrupt
1564 * disabled.
1565 */
1566 write_seqlock_irqsave(&dev->iowait_lock, flags);
1567 while (!list_empty(list)) {
1568 struct iowait *wait;
1569
1570 if (n == ARRAY_SIZE(qps))
1571 break;
1572 wait = list_first_entry(list, struct iowait, list);
1573 qp = iowait_to_qp(wait);
1574 priv = qp->priv;
1575 list_del_init(&priv->s_iowait.list);
1576 priv->s_iowait.lock = NULL;
1577 /* refcount held until actual wake up */
1578 qps[n++] = qp;
1579 }
1580 /*
1581 * If there had been waiters and there are more
1582 * insure that we redo the force to avoid a potential hang.
1583 */
1584 if (n) {
1585 hfi1_sc_wantpiobuf_intr(sc, 0);
1586 if (!list_empty(list))
1587 hfi1_sc_wantpiobuf_intr(sc, 1);
1588 }
1589 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
1590
1591 for (i = 0; i < n; i++)
1592 hfi1_qp_wakeup(qps[i],
1593 RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
1594 }
1595
1596 /* translate a send credit update to a bit code of reasons */
1597 static inline int fill_code(u64 hw_free)
1598 {
1599 int code = 0;
1600
1601 if (hw_free & CR_STATUS_SMASK)
1602 code |= PRC_STATUS_ERR;
1603 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1604 code |= PRC_PBC;
1605 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1606 code |= PRC_THRESHOLD;
1607 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1608 code |= PRC_FILL_ERR;
1609 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1610 code |= PRC_SC_DISABLE;
1611 return code;
1612 }
1613
1614 /* use the jiffies compare to get the wrap right */
1615 #define sent_before(a, b) time_before(a, b) /* a < b */
1616
1617 /*
1618 * The send context buffer "releaser".
1619 */
1620 void sc_release_update(struct send_context *sc)
1621 {
1622 struct pio_buf *pbuf;
1623 u64 hw_free;
1624 u32 head, tail;
1625 unsigned long old_free;
1626 unsigned long free;
1627 unsigned long extra;
1628 unsigned long flags;
1629 int code;
1630
1631 if (!sc)
1632 return;
1633
1634 spin_lock_irqsave(&sc->release_lock, flags);
1635 /* update free */
1636 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
1637 old_free = sc->free;
1638 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1639 - (old_free & CR_COUNTER_MASK))
1640 & CR_COUNTER_MASK;
1641 free = old_free + extra;
1642 trace_hfi1_piofree(sc, extra);
1643
1644 /* call sent buffer callbacks */
1645 code = -1; /* code not yet set */
1646 head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
1647 tail = sc->sr_tail;
1648 while (head != tail) {
1649 pbuf = &sc->sr[tail].pbuf;
1650
1651 if (sent_before(free, pbuf->sent_at)) {
1652 /* not sent yet */
1653 break;
1654 }
1655 if (pbuf->cb) {
1656 if (code < 0) /* fill in code on first user */
1657 code = fill_code(hw_free);
1658 (*pbuf->cb)(pbuf->arg, code);
1659 }
1660
1661 tail++;
1662 if (tail >= sc->sr_size)
1663 tail = 0;
1664 }
1665 sc->sr_tail = tail;
1666 /* make sure tail is updated before free */
1667 smp_wmb();
1668 sc->free = free;
1669 spin_unlock_irqrestore(&sc->release_lock, flags);
1670 sc_piobufavail(sc);
1671 }
1672
1673 /*
1674 * Send context group releaser. Argument is the send context that caused
1675 * the interrupt. Called from the send context interrupt handler.
1676 *
1677 * Call release on all contexts in the group.
1678 *
1679 * This routine takes the sc_lock without an irqsave because it is only
1680 * called from an interrupt handler. Adjust if that changes.
1681 */
1682 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1683 {
1684 struct send_context *sc;
1685 u32 sw_index;
1686 u32 gc, gc_end;
1687
1688 spin_lock(&dd->sc_lock);
1689 sw_index = dd->hw_to_sw[hw_context];
1690 if (unlikely(sw_index >= dd->num_send_contexts)) {
1691 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1692 __func__, hw_context, sw_index);
1693 goto done;
1694 }
1695 sc = dd->send_contexts[sw_index].sc;
1696 if (unlikely(!sc))
1697 goto done;
1698
1699 gc = group_context(hw_context, sc->group);
1700 gc_end = gc + group_size(sc->group);
1701 for (; gc < gc_end; gc++) {
1702 sw_index = dd->hw_to_sw[gc];
1703 if (unlikely(sw_index >= dd->num_send_contexts)) {
1704 dd_dev_err(dd,
1705 "%s: invalid hw (%u) to sw (%u) mapping\n",
1706 __func__, hw_context, sw_index);
1707 continue;
1708 }
1709 sc_release_update(dd->send_contexts[sw_index].sc);
1710 }
1711 done:
1712 spin_unlock(&dd->sc_lock);
1713 }
1714
1715 /*
1716 * pio_select_send_context_vl() - select send context
1717 * @dd: devdata
1718 * @selector: a spreading factor
1719 * @vl: this vl
1720 *
1721 * This function returns a send context based on the selector and a vl.
1722 * The mapping fields are protected by RCU
1723 */
1724 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1725 u32 selector, u8 vl)
1726 {
1727 struct pio_vl_map *m;
1728 struct pio_map_elem *e;
1729 struct send_context *rval;
1730
1731 /*
1732 * NOTE This should only happen if SC->VL changed after the initial
1733 * checks on the QP/AH
1734 * Default will return VL0's send context below
1735 */
1736 if (unlikely(vl >= num_vls)) {
1737 rval = NULL;
1738 goto done;
1739 }
1740
1741 rcu_read_lock();
1742 m = rcu_dereference(dd->pio_map);
1743 if (unlikely(!m)) {
1744 rcu_read_unlock();
1745 return dd->vld[0].sc;
1746 }
1747 e = m->map[vl & m->mask];
1748 rval = e->ksc[selector & e->mask];
1749 rcu_read_unlock();
1750
1751 done:
1752 rval = !rval ? dd->vld[0].sc : rval;
1753 return rval;
1754 }
1755
1756 /*
1757 * pio_select_send_context_sc() - select send context
1758 * @dd: devdata
1759 * @selector: a spreading factor
1760 * @sc5: the 5 bit sc
1761 *
1762 * This function returns an send context based on the selector and an sc
1763 */
1764 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1765 u32 selector, u8 sc5)
1766 {
1767 u8 vl = sc_to_vlt(dd, sc5);
1768
1769 return pio_select_send_context_vl(dd, selector, vl);
1770 }
1771
1772 /*
1773 * Free the indicated map struct
1774 */
1775 static void pio_map_free(struct pio_vl_map *m)
1776 {
1777 int i;
1778
1779 for (i = 0; m && i < m->actual_vls; i++)
1780 kfree(m->map[i]);
1781 kfree(m);
1782 }
1783
1784 /*
1785 * Handle RCU callback
1786 */
1787 static void pio_map_rcu_callback(struct rcu_head *list)
1788 {
1789 struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1790
1791 pio_map_free(m);
1792 }
1793
1794 /*
1795 * Set credit return threshold for the kernel send context
1796 */
1797 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1798 {
1799 u32 thres;
1800
1801 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1802 50),
1803 sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1804 dd->vld[i].mtu,
1805 dd->rcd[0]->rcvhdrqentsize));
1806 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1807 }
1808
1809 /*
1810 * pio_map_init - called when #vls change
1811 * @dd: hfi1_devdata
1812 * @port: port number
1813 * @num_vls: number of vls
1814 * @vl_scontexts: per vl send context mapping (optional)
1815 *
1816 * This routine changes the mapping based on the number of vls.
1817 *
1818 * vl_scontexts is used to specify a non-uniform vl/send context
1819 * loading. NULL implies auto computing the loading and giving each
1820 * VL an uniform distribution of send contexts per VL.
1821 *
1822 * The auto algorithm computers the sc_per_vl and the number of extra
1823 * send contexts. Any extra send contexts are added from the last VL
1824 * on down
1825 *
1826 * rcu locking is used here to control access to the mapping fields.
1827 *
1828 * If either the num_vls or num_send_contexts are non-power of 2, the
1829 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1830 * rounded up to the next highest power of 2 and the first entry is
1831 * reused in a round robin fashion.
1832 *
1833 * If an error occurs the map change is not done and the mapping is not
1834 * chaged.
1835 *
1836 */
1837 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1838 {
1839 int i, j;
1840 int extra, sc_per_vl;
1841 int scontext = 1;
1842 int num_kernel_send_contexts = 0;
1843 u8 lvl_scontexts[OPA_MAX_VLS];
1844 struct pio_vl_map *oldmap, *newmap;
1845
1846 if (!vl_scontexts) {
1847 for (i = 0; i < dd->num_send_contexts; i++)
1848 if (dd->send_contexts[i].type == SC_KERNEL)
1849 num_kernel_send_contexts++;
1850 /* truncate divide */
1851 sc_per_vl = num_kernel_send_contexts / num_vls;
1852 /* extras */
1853 extra = num_kernel_send_contexts % num_vls;
1854 vl_scontexts = lvl_scontexts;
1855 /* add extras from last vl down */
1856 for (i = num_vls - 1; i >= 0; i--, extra--)
1857 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1858 }
1859 /* build new map */
1860 newmap = kzalloc(sizeof(*newmap) +
1861 roundup_pow_of_two(num_vls) *
1862 sizeof(struct pio_map_elem *),
1863 GFP_KERNEL);
1864 if (!newmap)
1865 goto bail;
1866 newmap->actual_vls = num_vls;
1867 newmap->vls = roundup_pow_of_two(num_vls);
1868 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1869 for (i = 0; i < newmap->vls; i++) {
1870 /* save for wrap around */
1871 int first_scontext = scontext;
1872
1873 if (i < newmap->actual_vls) {
1874 int sz = roundup_pow_of_two(vl_scontexts[i]);
1875
1876 /* only allocate once */
1877 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1878 sz * sizeof(struct
1879 send_context *),
1880 GFP_KERNEL);
1881 if (!newmap->map[i])
1882 goto bail;
1883 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1884 /*
1885 * assign send contexts and
1886 * adjust credit return threshold
1887 */
1888 for (j = 0; j < sz; j++) {
1889 if (dd->kernel_send_context[scontext]) {
1890 newmap->map[i]->ksc[j] =
1891 dd->kernel_send_context[scontext];
1892 set_threshold(dd, scontext, i);
1893 }
1894 if (++scontext >= first_scontext +
1895 vl_scontexts[i])
1896 /* wrap back to first send context */
1897 scontext = first_scontext;
1898 }
1899 } else {
1900 /* just re-use entry without allocating */
1901 newmap->map[i] = newmap->map[i % num_vls];
1902 }
1903 scontext = first_scontext + vl_scontexts[i];
1904 }
1905 /* newmap in hand, save old map */
1906 spin_lock_irq(&dd->pio_map_lock);
1907 oldmap = rcu_dereference_protected(dd->pio_map,
1908 lockdep_is_held(&dd->pio_map_lock));
1909
1910 /* publish newmap */
1911 rcu_assign_pointer(dd->pio_map, newmap);
1912
1913 spin_unlock_irq(&dd->pio_map_lock);
1914 /* success, free any old map after grace period */
1915 if (oldmap)
1916 call_rcu(&oldmap->list, pio_map_rcu_callback);
1917 return 0;
1918 bail:
1919 /* free any partial allocation */
1920 pio_map_free(newmap);
1921 return -ENOMEM;
1922 }
1923
1924 void free_pio_map(struct hfi1_devdata *dd)
1925 {
1926 /* Free PIO map if allocated */
1927 if (rcu_access_pointer(dd->pio_map)) {
1928 spin_lock_irq(&dd->pio_map_lock);
1929 pio_map_free(rcu_access_pointer(dd->pio_map));
1930 RCU_INIT_POINTER(dd->pio_map, NULL);
1931 spin_unlock_irq(&dd->pio_map_lock);
1932 synchronize_rcu();
1933 }
1934 kfree(dd->kernel_send_context);
1935 dd->kernel_send_context = NULL;
1936 }
1937
1938 int init_pervl_scs(struct hfi1_devdata *dd)
1939 {
1940 int i;
1941 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
1942 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
1943 u32 ctxt;
1944 struct hfi1_pportdata *ppd = dd->pport;
1945
1946 dd->vld[15].sc = sc_alloc(dd, SC_VL15,
1947 dd->rcd[0]->rcvhdrqentsize, dd->node);
1948 if (!dd->vld[15].sc)
1949 return -ENOMEM;
1950
1951 hfi1_init_ctxt(dd->vld[15].sc);
1952 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1953
1954 dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
1955 sizeof(struct send_context *),
1956 GFP_KERNEL, dd->node);
1957 if (!dd->kernel_send_context)
1958 goto freesc15;
1959
1960 dd->kernel_send_context[0] = dd->vld[15].sc;
1961
1962 for (i = 0; i < num_vls; i++) {
1963 /*
1964 * Since this function does not deal with a specific
1965 * receive context but we need the RcvHdrQ entry size,
1966 * use the size from rcd[0]. It is guaranteed to be
1967 * valid at this point and will remain the same for all
1968 * receive contexts.
1969 */
1970 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
1971 dd->rcd[0]->rcvhdrqentsize, dd->node);
1972 if (!dd->vld[i].sc)
1973 goto nomem;
1974 dd->kernel_send_context[i + 1] = dd->vld[i].sc;
1975 hfi1_init_ctxt(dd->vld[i].sc);
1976 /* non VL15 start with the max MTU */
1977 dd->vld[i].mtu = hfi1_max_mtu;
1978 }
1979 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
1980 dd->kernel_send_context[i + 1] =
1981 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
1982 if (!dd->kernel_send_context[i + 1])
1983 goto nomem;
1984 hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
1985 }
1986
1987 sc_enable(dd->vld[15].sc);
1988 ctxt = dd->vld[15].sc->hw_context;
1989 mask = all_vl_mask & ~(1LL << 15);
1990 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1991 dd_dev_info(dd,
1992 "Using send context %u(%u) for VL15\n",
1993 dd->vld[15].sc->sw_index, ctxt);
1994
1995 for (i = 0; i < num_vls; i++) {
1996 sc_enable(dd->vld[i].sc);
1997 ctxt = dd->vld[i].sc->hw_context;
1998 mask = all_vl_mask & ~(data_vls_mask);
1999 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2000 }
2001 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2002 sc_enable(dd->kernel_send_context[i + 1]);
2003 ctxt = dd->kernel_send_context[i + 1]->hw_context;
2004 mask = all_vl_mask & ~(data_vls_mask);
2005 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2006 }
2007
2008 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2009 goto nomem;
2010 return 0;
2011
2012 nomem:
2013 for (i = 0; i < num_vls; i++) {
2014 sc_free(dd->vld[i].sc);
2015 dd->vld[i].sc = NULL;
2016 }
2017
2018 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2019 sc_free(dd->kernel_send_context[i + 1]);
2020
2021 kfree(dd->kernel_send_context);
2022 dd->kernel_send_context = NULL;
2023
2024 freesc15:
2025 sc_free(dd->vld[15].sc);
2026 return -ENOMEM;
2027 }
2028
2029 int init_credit_return(struct hfi1_devdata *dd)
2030 {
2031 int ret;
2032 int i;
2033
2034 dd->cr_base = kcalloc(
2035 node_affinity.num_possible_nodes,
2036 sizeof(struct credit_return_base),
2037 GFP_KERNEL);
2038 if (!dd->cr_base) {
2039 ret = -ENOMEM;
2040 goto done;
2041 }
2042 for_each_node_with_cpus(i) {
2043 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2044
2045 set_dev_node(&dd->pcidev->dev, i);
2046 dd->cr_base[i].va = dma_zalloc_coherent(
2047 &dd->pcidev->dev,
2048 bytes,
2049 &dd->cr_base[i].dma,
2050 GFP_KERNEL);
2051 if (!dd->cr_base[i].va) {
2052 set_dev_node(&dd->pcidev->dev, dd->node);
2053 dd_dev_err(dd,
2054 "Unable to allocate credit return DMA range for NUMA %d\n",
2055 i);
2056 ret = -ENOMEM;
2057 goto done;
2058 }
2059 }
2060 set_dev_node(&dd->pcidev->dev, dd->node);
2061
2062 ret = 0;
2063 done:
2064 return ret;
2065 }
2066
2067 void free_credit_return(struct hfi1_devdata *dd)
2068 {
2069 int i;
2070
2071 if (!dd->cr_base)
2072 return;
2073 for (i = 0; i < node_affinity.num_possible_nodes; i++) {
2074 if (dd->cr_base[i].va) {
2075 dma_free_coherent(&dd->pcidev->dev,
2076 TXE_NUM_CONTEXTS *
2077 sizeof(struct credit_return),
2078 dd->cr_base[i].va,
2079 dd->cr_base[i].dma);
2080 }
2081 }
2082 kfree(dd->cr_base);
2083 dd->cr_base = NULL;
2084 }