]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/tile/gxio/mpipe.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / arch / tile / gxio / mpipe.c
1 /*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15 /*
16 * Implementation of mpipe gxio calls.
17 */
18
19 #include <linux/errno.h>
20 #include <linux/io.h>
21 #include <linux/module.h>
22
23 #include <gxio/iorpc_globals.h>
24 #include <gxio/iorpc_mpipe.h>
25 #include <gxio/iorpc_mpipe_info.h>
26 #include <gxio/kiorpc.h>
27 #include <gxio/mpipe.h>
28
29 /* HACK: Avoid pointless "shadow" warnings. */
30 #define link link_shadow
31
32 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
33 {
34 char file[32];
35
36 int fd;
37 int i;
38
39 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
40 fd = hv_dev_open((HV_VirtAddr) file, 0);
41 if (fd < 0) {
42 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
43 return fd;
44 else
45 return -ENODEV;
46 }
47
48 context->fd = fd;
49
50 /* Map in the MMIO space. */
51 context->mmio_cfg_base = (void __force *)
52 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
53 HV_MPIPE_CONFIG_MMIO_SIZE);
54 if (context->mmio_cfg_base == NULL)
55 goto cfg_failed;
56
57 context->mmio_fast_base = (void __force *)
58 iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
59 HV_MPIPE_FAST_MMIO_SIZE);
60 if (context->mmio_fast_base == NULL)
61 goto fast_failed;
62
63 /* Initialize the stacks. */
64 for (i = 0; i < 8; i++)
65 context->__stacks.stacks[i] = 255;
66
67 return 0;
68
69 fast_failed:
70 iounmap((void __force __iomem *)(context->mmio_cfg_base));
71 cfg_failed:
72 hv_dev_close(context->fd);
73 return -ENODEV;
74 }
75
76 EXPORT_SYMBOL_GPL(gxio_mpipe_init);
77
78 int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
79 {
80 iounmap((void __force __iomem *)(context->mmio_cfg_base));
81 iounmap((void __force __iomem *)(context->mmio_fast_base));
82 return hv_dev_close(context->fd);
83 }
84
85 EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
86
87 static int16_t gxio_mpipe_buffer_sizes[8] =
88 { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
89
90 gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
91 size)
92 {
93 int i;
94 for (i = 0; i < 7; i++)
95 if (size <= gxio_mpipe_buffer_sizes[i])
96 break;
97 return i;
98 }
99
100 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
101
102 size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
103 buffer_size_enum)
104 {
105 if (buffer_size_enum > 7)
106 buffer_size_enum = 7;
107
108 return gxio_mpipe_buffer_sizes[buffer_size_enum];
109 }
110
111 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
112
113 size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
114 {
115 const int BUFFERS_PER_LINE = 12;
116
117 /* Count the number of cachlines. */
118 unsigned long lines =
119 (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
120
121 /* Convert to bytes. */
122 return lines * CHIP_L2_LINE_SIZE();
123 }
124
125 EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
126
127 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
128 unsigned int stack,
129 gxio_mpipe_buffer_size_enum_t
130 buffer_size_enum, void *mem, size_t mem_size,
131 unsigned int mem_flags)
132 {
133 int result;
134
135 memset(mem, 0, mem_size);
136
137 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
138 mem_flags, stack,
139 buffer_size_enum);
140 if (result < 0)
141 return result;
142
143 /* Save the stack. */
144 context->__stacks.stacks[buffer_size_enum] = stack;
145
146 return 0;
147 }
148
149 EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
150
151 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
152 unsigned int ring,
153 void *mem, size_t mem_size,
154 unsigned int mem_flags)
155 {
156 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
157 mem_flags, ring);
158 }
159
160 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
161
162 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
163 unsigned int group,
164 unsigned int ring,
165 unsigned int num_rings,
166 unsigned int bucket,
167 unsigned int num_buckets,
168 gxio_mpipe_bucket_mode_t mode)
169 {
170 int i;
171 int result;
172
173 gxio_mpipe_bucket_info_t bucket_info = { {
174 .group = group,
175 .mode = mode,
176 }
177 };
178
179 gxio_mpipe_notif_group_bits_t bits = { {0} };
180
181 for (i = 0; i < num_rings; i++)
182 gxio_mpipe_notif_group_add_ring(&bits, ring + i);
183
184 result = gxio_mpipe_init_notif_group(context, group, bits);
185 if (result != 0)
186 return result;
187
188 for (i = 0; i < num_buckets; i++) {
189 bucket_info.notifring = ring + (i % num_rings);
190
191 result = gxio_mpipe_init_bucket(context, bucket + i,
192 bucket_info);
193 if (result != 0)
194 return result;
195 }
196
197 return 0;
198 }
199
200 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
201
202 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
203 unsigned int ring, unsigned int channel,
204 void *mem, size_t mem_size,
205 unsigned int mem_flags)
206 {
207 memset(mem, 0, mem_size);
208
209 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
210 ring, channel);
211 }
212
213 EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
214
215 void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
216 gxio_mpipe_context_t *context)
217 {
218 rules->context = context;
219 memset(&rules->list, 0, sizeof(rules->list));
220 }
221
222 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
223
224 int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
225 unsigned int bucket, unsigned int num_buckets,
226 gxio_mpipe_rules_stacks_t *stacks)
227 {
228 int i;
229 int stack = 255;
230
231 gxio_mpipe_rules_list_t *list = &rules->list;
232
233 /* Current rule. */
234 gxio_mpipe_rules_rule_t *rule =
235 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
236
237 unsigned int head = list->tail;
238
239 /*
240 * Align next rule properly.
241 *Note that "dmacs_and_vlans" will also be aligned.
242 */
243 unsigned int pad = 0;
244 while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
245 pad++;
246
247 /*
248 * Verify room.
249 * ISSUE: Mark rules as broken on error?
250 */
251 if (head + pad + sizeof(*rule) >= sizeof(list->rules))
252 return GXIO_MPIPE_ERR_RULES_FULL;
253
254 /* Verify num_buckets is a power of 2. */
255 if (__builtin_popcount(num_buckets) != 1)
256 return GXIO_MPIPE_ERR_RULES_INVALID;
257
258 /* Add padding to previous rule. */
259 rule->size += pad;
260
261 /* Start a new rule. */
262 list->head = head + pad;
263
264 rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
265
266 /* Default some values. */
267 rule->headroom = 2;
268 rule->tailroom = 0;
269 rule->capacity = 16384;
270
271 /* Save the bucket info. */
272 rule->bucket_mask = num_buckets - 1;
273 rule->bucket_first = bucket;
274
275 for (i = 8 - 1; i >= 0; i--) {
276 int maybe =
277 stacks ? stacks->stacks[i] : rules->context->__stacks.
278 stacks[i];
279 if (maybe != 255)
280 stack = maybe;
281 rule->stacks.stacks[i] = stack;
282 }
283
284 if (stack == 255)
285 return GXIO_MPIPE_ERR_RULES_INVALID;
286
287 /* NOTE: Only entries at the end of the array can be 255. */
288 for (i = 8 - 1; i > 0; i--) {
289 if (rule->stacks.stacks[i] == 255) {
290 rule->stacks.stacks[i] = stack;
291 rule->capacity =
292 gxio_mpipe_buffer_size_enum_to_buffer_size(i -
293 1);
294 }
295 }
296
297 rule->size = sizeof(*rule);
298 list->tail = list->head + rule->size;
299
300 return 0;
301 }
302
303 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
304
305 int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
306 unsigned int channel)
307 {
308 gxio_mpipe_rules_list_t *list = &rules->list;
309
310 gxio_mpipe_rules_rule_t *rule =
311 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
312
313 /* Verify channel. */
314 if (channel >= 32)
315 return GXIO_MPIPE_ERR_RULES_INVALID;
316
317 /* Verify begun. */
318 if (list->tail == 0)
319 return GXIO_MPIPE_ERR_RULES_EMPTY;
320
321 rule->channel_bits |= (1UL << channel);
322
323 return 0;
324 }
325
326 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
327
328 int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
329 {
330 gxio_mpipe_rules_list_t *list = &rules->list;
331
332 gxio_mpipe_rules_rule_t *rule =
333 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
334
335 /* Verify begun. */
336 if (list->tail == 0)
337 return GXIO_MPIPE_ERR_RULES_EMPTY;
338
339 rule->headroom = headroom;
340
341 return 0;
342 }
343
344 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
345
346 int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
347 {
348 gxio_mpipe_rules_list_t *list = &rules->list;
349 unsigned int size =
350 offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
351 return gxio_mpipe_commit_rules(rules->context, list, size);
352 }
353
354 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
355
356 int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
357 gxio_mpipe_context_t *context,
358 unsigned int ring,
359 void *mem, size_t mem_size, unsigned int mem_flags)
360 {
361 /* The init call below will verify that "mem_size" is legal. */
362 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
363
364 iqueue->context = context;
365 iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
366 iqueue->ring = ring;
367 iqueue->num_entries = num_entries;
368 iqueue->mask_num_entries = num_entries - 1;
369 iqueue->log2_num_entries = __builtin_ctz(num_entries);
370 iqueue->head = 1;
371 #ifdef __BIG_ENDIAN__
372 iqueue->swapped = 0;
373 #endif
374
375 /* Initialize the "tail". */
376 __gxio_mmio_write(mem, iqueue->head);
377
378 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
379 mem_flags);
380 }
381
382 EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
383
384 int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
385 gxio_mpipe_context_t *context,
386 unsigned int edma_ring_id,
387 unsigned int channel,
388 void *mem, unsigned int mem_size,
389 unsigned int mem_flags)
390 {
391 /* The init call below will verify that "mem_size" is legal. */
392 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
393
394 /* Offset used to read number of completed commands. */
395 MPIPE_EDMA_POST_REGION_ADDR_t offset;
396
397 int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
398 mem, mem_size, mem_flags);
399 if (result < 0)
400 return result;
401
402 memset(equeue, 0, sizeof(*equeue));
403
404 offset.word = 0;
405 offset.region =
406 MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
407 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
408 offset.ring = edma_ring_id;
409
410 __gxio_dma_queue_init(&equeue->dma_queue,
411 context->mmio_fast_base + offset.word,
412 num_entries);
413 equeue->edescs = mem;
414 equeue->mask_num_entries = num_entries - 1;
415 equeue->log2_num_entries = __builtin_ctz(num_entries);
416
417 return 0;
418 }
419
420 EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
421
422 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
423 const struct timespec *ts)
424 {
425 cycles_t cycles = get_cycles();
426 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
427 (uint64_t)ts->tv_nsec,
428 (uint64_t)cycles);
429 }
430
431 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
432 struct timespec *ts)
433 {
434 int ret;
435 cycles_t cycles_prev, cycles_now, clock_rate;
436 cycles_prev = get_cycles();
437 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
438 (uint64_t *)&ts->tv_nsec,
439 (uint64_t *)&cycles_now);
440 if (ret < 0) {
441 return ret;
442 }
443
444 clock_rate = get_clock_rate();
445 ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
446 if (ts->tv_nsec < 0) {
447 ts->tv_nsec += 1000000000LL;
448 ts->tv_sec -= 1;
449 }
450 return ret;
451 }
452
453 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
454 {
455 return gxio_mpipe_adjust_timestamp_aux(context, delta);
456 }
457
458 /* Get our internal context used for link name access. This context is
459 * special in that it is not associated with an mPIPE service domain.
460 */
461 static gxio_mpipe_context_t *_gxio_get_link_context(void)
462 {
463 static gxio_mpipe_context_t context;
464 static gxio_mpipe_context_t *contextp;
465 static int tried_open = 0;
466 static DEFINE_MUTEX(mutex);
467
468 mutex_lock(&mutex);
469
470 if (!tried_open) {
471 int i = 0;
472 tried_open = 1;
473
474 /*
475 * "4" here is the maximum possible number of mPIPE shims; it's
476 * an exaggeration but we shouldn't ever go beyond 2 anyway.
477 */
478 for (i = 0; i < 4; i++) {
479 char file[80];
480
481 snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
482 context.fd = hv_dev_open((HV_VirtAddr) file, 0);
483 if (context.fd < 0)
484 continue;
485
486 contextp = &context;
487 break;
488 }
489 }
490
491 mutex_unlock(&mutex);
492
493 return contextp;
494 }
495
496 int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
497 {
498 int rv;
499 _gxio_mpipe_link_name_t name;
500 _gxio_mpipe_link_mac_t mac;
501
502 gxio_mpipe_context_t *context = _gxio_get_link_context();
503 if (!context)
504 return GXIO_ERR_NO_DEVICE;
505
506 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
507 if (rv >= 0) {
508 strncpy(link_name, name.name, sizeof(name.name));
509 memcpy(link_mac, mac.mac, sizeof(mac.mac));
510 }
511
512 return rv;
513 }
514
515 EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
516
517 int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
518 gxio_mpipe_context_t *context, const char *link_name,
519 unsigned int flags)
520 {
521 _gxio_mpipe_link_name_t name;
522 int rv;
523
524 strncpy(name.name, link_name, sizeof(name.name));
525 name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
526
527 rv = gxio_mpipe_link_open_aux(context, name, flags);
528 if (rv < 0)
529 return rv;
530
531 link->context = context;
532 link->channel = rv >> 8;
533 link->mac = rv & 0xFF;
534
535 return 0;
536 }
537
538 EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
539
540 int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
541 {
542 return gxio_mpipe_link_close_aux(link->context, link->mac);
543 }
544
545 EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);