]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/most/mostcore/core.c
Merge branches 'pm-cpu', 'pm-cpuidle' and 'pm-domains'
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / most / mostcore / core.c
1 /*
2 * core.c - Implementation of core module of MOST Linux driver stack
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
30 #include "mostcore.h"
31
32 #define MAX_CHANNELS 64
33 #define STRING_SIZE 80
34
35 static struct class *most_class;
36 static struct device *class_glue_dir;
37 static struct ida mdev_id;
38 static int modref;
39
40 struct most_c_obj {
41 struct kobject kobj;
42 struct completion cleanup;
43 atomic_t mbo_ref;
44 atomic_t mbo_nq_level;
45 uint16_t channel_id;
46 bool is_poisoned;
47 bool is_started;
48 int is_starving;
49 struct most_interface *iface;
50 struct most_inst_obj *inst;
51 struct most_channel_config cfg;
52 bool keep_mbo;
53 bool enqueue_halt;
54 struct list_head fifo;
55 spinlock_t fifo_lock;
56 struct list_head halt_fifo;
57 struct list_head list;
58 struct most_aim *first_aim;
59 struct most_aim *second_aim;
60 struct list_head trash_fifo;
61 struct task_struct *hdm_enqueue_task;
62 struct mutex stop_task_mutex;
63 wait_queue_head_t hdm_fifo_wq;
64 };
65 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
66
67 struct most_inst_obj {
68 int dev_id;
69 atomic_t tainted;
70 struct most_interface *iface;
71 struct list_head channel_list;
72 struct most_c_obj *channel[MAX_CHANNELS];
73 struct kobject kobj;
74 struct list_head list;
75 };
76 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
77
78 /**
79 * list_pop_mbo - retrieves the first MBO of the list and removes it
80 * @ptr: the list head to grab the MBO from.
81 */
82 #define list_pop_mbo(ptr) \
83 ({ \
84 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
85 list_del(&_mbo->list); \
86 _mbo; \
87 })
88
89 static struct mutex deregister_mutex;
90
91 /* ___ ___
92 * ___C H A N N E L___
93 */
94
95 /**
96 * struct most_c_attr - to access the attributes of a channel object
97 * @attr: attributes of a channel
98 * @show: pointer to the show function
99 * @store: pointer to the store function
100 */
101 struct most_c_attr {
102 struct attribute attr;
103 ssize_t (*show)(struct most_c_obj *d,
104 struct most_c_attr *attr,
105 char *buf);
106 ssize_t (*store)(struct most_c_obj *d,
107 struct most_c_attr *attr,
108 const char *buf,
109 size_t count);
110 };
111 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
112
113 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
114 struct most_c_attr most_chnl_attr_##_name = \
115 __ATTR(_name, _mode, _show, _store)
116
117 /**
118 * channel_attr_show - show function of channel object
119 * @kobj: pointer to its kobject
120 * @attr: pointer to its attributes
121 * @buf: buffer
122 */
123 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
124 char *buf)
125 {
126 struct most_c_attr *channel_attr = to_channel_attr(attr);
127 struct most_c_obj *c_obj = to_c_obj(kobj);
128
129 if (!channel_attr->show)
130 return -EIO;
131
132 return channel_attr->show(c_obj, channel_attr, buf);
133 }
134
135 /**
136 * channel_attr_store - store function of channel object
137 * @kobj: pointer to its kobject
138 * @attr: pointer to its attributes
139 * @buf: buffer
140 * @len: length of buffer
141 */
142 static ssize_t channel_attr_store(struct kobject *kobj,
143 struct attribute *attr,
144 const char *buf,
145 size_t len)
146 {
147 struct most_c_attr *channel_attr = to_channel_attr(attr);
148 struct most_c_obj *c_obj = to_c_obj(kobj);
149
150 if (!channel_attr->store)
151 return -EIO;
152 return channel_attr->store(c_obj, channel_attr, buf, len);
153 }
154
155 static const struct sysfs_ops most_channel_sysfs_ops = {
156 .show = channel_attr_show,
157 .store = channel_attr_store,
158 };
159
160 /**
161 * most_free_mbo_coherent - free an MBO and its coherent buffer
162 * @mbo: buffer to be released
163 *
164 */
165 static void most_free_mbo_coherent(struct mbo *mbo)
166 {
167 struct most_c_obj *c = mbo->context;
168 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
169
170 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
171 mbo->bus_address);
172 kfree(mbo);
173 if (atomic_sub_and_test(1, &c->mbo_ref))
174 complete(&c->cleanup);
175 }
176
177 /**
178 * flush_channel_fifos - clear the channel fifos
179 * @c: pointer to channel object
180 */
181 static void flush_channel_fifos(struct most_c_obj *c)
182 {
183 unsigned long flags, hf_flags;
184 struct mbo *mbo, *tmp;
185
186 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
187 return;
188
189 spin_lock_irqsave(&c->fifo_lock, flags);
190 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
191 list_del(&mbo->list);
192 spin_unlock_irqrestore(&c->fifo_lock, flags);
193 if (likely(mbo))
194 most_free_mbo_coherent(mbo);
195 spin_lock_irqsave(&c->fifo_lock, flags);
196 }
197 spin_unlock_irqrestore(&c->fifo_lock, flags);
198
199 spin_lock_irqsave(&c->fifo_lock, hf_flags);
200 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
201 list_del(&mbo->list);
202 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
203 if (likely(mbo))
204 most_free_mbo_coherent(mbo);
205 spin_lock_irqsave(&c->fifo_lock, hf_flags);
206 }
207 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
208
209 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
210 pr_info("WARN: fifo | trash fifo not empty\n");
211 }
212
213 /**
214 * flush_trash_fifo - clear the trash fifo
215 * @c: pointer to channel object
216 */
217 static int flush_trash_fifo(struct most_c_obj *c)
218 {
219 struct mbo *mbo, *tmp;
220 unsigned long flags;
221
222 spin_lock_irqsave(&c->fifo_lock, flags);
223 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
224 list_del(&mbo->list);
225 spin_unlock_irqrestore(&c->fifo_lock, flags);
226 most_free_mbo_coherent(mbo);
227 spin_lock_irqsave(&c->fifo_lock, flags);
228 }
229 spin_unlock_irqrestore(&c->fifo_lock, flags);
230 return 0;
231 }
232
233 /**
234 * most_channel_release - release function of channel object
235 * @kobj: pointer to channel's kobject
236 */
237 static void most_channel_release(struct kobject *kobj)
238 {
239 struct most_c_obj *c = to_c_obj(kobj);
240
241 kfree(c);
242 }
243
244 static ssize_t show_available_directions(struct most_c_obj *c,
245 struct most_c_attr *attr,
246 char *buf)
247 {
248 unsigned int i = c->channel_id;
249
250 strcpy(buf, "");
251 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
252 strcat(buf, "dir_rx ");
253 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
254 strcat(buf, "dir_tx ");
255 strcat(buf, "\n");
256 return strlen(buf) + 1;
257 }
258
259 static ssize_t show_available_datatypes(struct most_c_obj *c,
260 struct most_c_attr *attr,
261 char *buf)
262 {
263 unsigned int i = c->channel_id;
264
265 strcpy(buf, "");
266 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
267 strcat(buf, "control ");
268 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
269 strcat(buf, "async ");
270 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
271 strcat(buf, "sync ");
272 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
273 strcat(buf, "isoc_avp ");
274 strcat(buf, "\n");
275 return strlen(buf) + 1;
276 }
277
278 static
279 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
280 struct most_c_attr *attr,
281 char *buf)
282 {
283 unsigned int i = c->channel_id;
284
285 return snprintf(buf, PAGE_SIZE, "%d\n",
286 c->iface->channel_vector[i].num_buffers_packet);
287 }
288
289 static
290 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
291 struct most_c_attr *attr,
292 char *buf)
293 {
294 unsigned int i = c->channel_id;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n",
297 c->iface->channel_vector[i].num_buffers_streaming);
298 }
299
300 static
301 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
302 struct most_c_attr *attr,
303 char *buf)
304 {
305 unsigned int i = c->channel_id;
306
307 return snprintf(buf, PAGE_SIZE, "%d\n",
308 c->iface->channel_vector[i].buffer_size_packet);
309 }
310
311 static
312 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
313 struct most_c_attr *attr,
314 char *buf)
315 {
316 unsigned int i = c->channel_id;
317
318 return snprintf(buf, PAGE_SIZE, "%d\n",
319 c->iface->channel_vector[i].buffer_size_streaming);
320 }
321
322 static ssize_t show_channel_starving(struct most_c_obj *c,
323 struct most_c_attr *attr,
324 char *buf)
325 {
326 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
327 }
328
329
330 #define create_show_channel_attribute(val) \
331 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
332
333 create_show_channel_attribute(available_directions);
334 create_show_channel_attribute(available_datatypes);
335 create_show_channel_attribute(number_of_packet_buffers);
336 create_show_channel_attribute(number_of_stream_buffers);
337 create_show_channel_attribute(size_of_stream_buffer);
338 create_show_channel_attribute(size_of_packet_buffer);
339 create_show_channel_attribute(channel_starving);
340
341 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
342 struct most_c_attr *attr,
343 char *buf)
344 {
345 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
346 }
347
348 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
349 struct most_c_attr *attr,
350 const char *buf,
351 size_t count)
352 {
353 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
354
355 if (ret)
356 return ret;
357 return count;
358 }
359
360 static ssize_t show_set_buffer_size(struct most_c_obj *c,
361 struct most_c_attr *attr,
362 char *buf)
363 {
364 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
365 }
366
367 static ssize_t store_set_buffer_size(struct most_c_obj *c,
368 struct most_c_attr *attr,
369 const char *buf,
370 size_t count)
371 {
372 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
373
374 if (ret)
375 return ret;
376 return count;
377 }
378
379 static ssize_t show_set_direction(struct most_c_obj *c,
380 struct most_c_attr *attr,
381 char *buf)
382 {
383 if (c->cfg.direction & MOST_CH_TX)
384 return snprintf(buf, PAGE_SIZE, "dir_tx\n");
385 else if (c->cfg.direction & MOST_CH_RX)
386 return snprintf(buf, PAGE_SIZE, "dir_rx\n");
387 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
388 }
389
390 static ssize_t store_set_direction(struct most_c_obj *c,
391 struct most_c_attr *attr,
392 const char *buf,
393 size_t count)
394 {
395 if (!strcmp(buf, "dir_rx\n"))
396 c->cfg.direction = MOST_CH_RX;
397 else if (!strcmp(buf, "dir_tx\n"))
398 c->cfg.direction = MOST_CH_TX;
399 else {
400 pr_info("WARN: invalid attribute settings\n");
401 return -EINVAL;
402 }
403 return count;
404 }
405
406 static ssize_t show_set_datatype(struct most_c_obj *c,
407 struct most_c_attr *attr,
408 char *buf)
409 {
410 if (c->cfg.data_type & MOST_CH_CONTROL)
411 return snprintf(buf, PAGE_SIZE, "control\n");
412 else if (c->cfg.data_type & MOST_CH_ASYNC)
413 return snprintf(buf, PAGE_SIZE, "async\n");
414 else if (c->cfg.data_type & MOST_CH_SYNC)
415 return snprintf(buf, PAGE_SIZE, "sync\n");
416 else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
417 return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
418 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
419 }
420
421 static ssize_t store_set_datatype(struct most_c_obj *c,
422 struct most_c_attr *attr,
423 const char *buf,
424 size_t count)
425 {
426 if (!strcmp(buf, "control\n"))
427 c->cfg.data_type = MOST_CH_CONTROL;
428 else if (!strcmp(buf, "async\n"))
429 c->cfg.data_type = MOST_CH_ASYNC;
430 else if (!strcmp(buf, "sync\n"))
431 c->cfg.data_type = MOST_CH_SYNC;
432 else if (!strcmp(buf, "isoc_avp\n"))
433 c->cfg.data_type = MOST_CH_ISOC_AVP;
434 else {
435 pr_info("WARN: invalid attribute settings\n");
436 return -EINVAL;
437 }
438 return count;
439 }
440
441 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
442 struct most_c_attr *attr,
443 char *buf)
444 {
445 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
446 }
447
448 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
449 struct most_c_attr *attr,
450 const char *buf,
451 size_t count)
452 {
453 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
454
455 if (ret)
456 return ret;
457 return count;
458 }
459
460 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
461 struct most_c_attr *attr,
462 char *buf)
463 {
464 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
465 }
466
467 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
468 struct most_c_attr *attr,
469 const char *buf,
470 size_t count)
471 {
472 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
473
474 if (ret)
475 return ret;
476 return count;
477 }
478
479 #define create_channel_attribute(value) \
480 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
481 show_##value, \
482 store_##value)
483
484 create_channel_attribute(set_buffer_size);
485 create_channel_attribute(set_number_of_buffers);
486 create_channel_attribute(set_direction);
487 create_channel_attribute(set_datatype);
488 create_channel_attribute(set_subbuffer_size);
489 create_channel_attribute(set_packets_per_xact);
490
491
492 /**
493 * most_channel_def_attrs - array of default attributes of channel object
494 */
495 static struct attribute *most_channel_def_attrs[] = {
496 &most_chnl_attr_available_directions.attr,
497 &most_chnl_attr_available_datatypes.attr,
498 &most_chnl_attr_number_of_packet_buffers.attr,
499 &most_chnl_attr_number_of_stream_buffers.attr,
500 &most_chnl_attr_size_of_packet_buffer.attr,
501 &most_chnl_attr_size_of_stream_buffer.attr,
502 &most_chnl_attr_set_number_of_buffers.attr,
503 &most_chnl_attr_set_buffer_size.attr,
504 &most_chnl_attr_set_direction.attr,
505 &most_chnl_attr_set_datatype.attr,
506 &most_chnl_attr_set_subbuffer_size.attr,
507 &most_chnl_attr_set_packets_per_xact.attr,
508 &most_chnl_attr_channel_starving.attr,
509 NULL,
510 };
511
512 static struct kobj_type most_channel_ktype = {
513 .sysfs_ops = &most_channel_sysfs_ops,
514 .release = most_channel_release,
515 .default_attrs = most_channel_def_attrs,
516 };
517
518 static struct kset *most_channel_kset;
519
520 /**
521 * create_most_c_obj - allocates a channel object
522 * @name: name of the channel object
523 * @parent: parent kobject
524 *
525 * This create a channel object and registers it with sysfs.
526 * Returns a pointer to the object or NULL when something went wrong.
527 */
528 static struct most_c_obj *
529 create_most_c_obj(const char *name, struct kobject *parent)
530 {
531 struct most_c_obj *c;
532 int retval;
533
534 c = kzalloc(sizeof(*c), GFP_KERNEL);
535 if (!c)
536 return NULL;
537 c->kobj.kset = most_channel_kset;
538 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
539 "%s", name);
540 if (retval) {
541 kobject_put(&c->kobj);
542 return NULL;
543 }
544 kobject_uevent(&c->kobj, KOBJ_ADD);
545 return c;
546 }
547
548 /**
549 * destroy_most_c_obj - channel release function
550 * @c: pointer to channel object
551 *
552 * This decrements the reference counter of the channel object.
553 * If the reference count turns zero, its release function is called.
554 */
555 static void destroy_most_c_obj(struct most_c_obj *c)
556 {
557 if (c->first_aim)
558 c->first_aim->disconnect_channel(c->iface, c->channel_id);
559 if (c->second_aim)
560 c->second_aim->disconnect_channel(c->iface, c->channel_id);
561 c->first_aim = NULL;
562 c->second_aim = NULL;
563
564 mutex_lock(&deregister_mutex);
565 flush_trash_fifo(c);
566 flush_channel_fifos(c);
567 mutex_unlock(&deregister_mutex);
568 kobject_put(&c->kobj);
569 }
570
571 /* ___ ___
572 * ___I N S T A N C E___
573 */
574 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
575 struct most_inst_attribute most_inst_attr_##_name = \
576 __ATTR(_name, _mode, _show, _store)
577
578 static struct list_head instance_list;
579
580 /**
581 * struct most_inst_attribute - to access the attributes of instance object
582 * @attr: attributes of an instance
583 * @show: pointer to the show function
584 * @store: pointer to the store function
585 */
586 struct most_inst_attribute {
587 struct attribute attr;
588 ssize_t (*show)(struct most_inst_obj *d,
589 struct most_inst_attribute *attr,
590 char *buf);
591 ssize_t (*store)(struct most_inst_obj *d,
592 struct most_inst_attribute *attr,
593 const char *buf,
594 size_t count);
595 };
596 #define to_instance_attr(a) \
597 container_of(a, struct most_inst_attribute, attr)
598
599 /**
600 * instance_attr_show - show function for an instance object
601 * @kobj: pointer to kobject
602 * @attr: pointer to attribute struct
603 * @buf: buffer
604 */
605 static ssize_t instance_attr_show(struct kobject *kobj,
606 struct attribute *attr,
607 char *buf)
608 {
609 struct most_inst_attribute *instance_attr;
610 struct most_inst_obj *instance_obj;
611
612 instance_attr = to_instance_attr(attr);
613 instance_obj = to_inst_obj(kobj);
614
615 if (!instance_attr->show)
616 return -EIO;
617
618 return instance_attr->show(instance_obj, instance_attr, buf);
619 }
620
621 /**
622 * instance_attr_store - store function for an instance object
623 * @kobj: pointer to kobject
624 * @attr: pointer to attribute struct
625 * @buf: buffer
626 * @len: length of buffer
627 */
628 static ssize_t instance_attr_store(struct kobject *kobj,
629 struct attribute *attr,
630 const char *buf,
631 size_t len)
632 {
633 struct most_inst_attribute *instance_attr;
634 struct most_inst_obj *instance_obj;
635
636 instance_attr = to_instance_attr(attr);
637 instance_obj = to_inst_obj(kobj);
638
639 if (!instance_attr->store)
640 return -EIO;
641
642 return instance_attr->store(instance_obj, instance_attr, buf, len);
643 }
644
645 static const struct sysfs_ops most_inst_sysfs_ops = {
646 .show = instance_attr_show,
647 .store = instance_attr_store,
648 };
649
650 /**
651 * most_inst_release - release function for instance object
652 * @kobj: pointer to instance's kobject
653 *
654 * This frees the allocated memory for the instance object
655 */
656 static void most_inst_release(struct kobject *kobj)
657 {
658 struct most_inst_obj *inst = to_inst_obj(kobj);
659
660 kfree(inst);
661 }
662
663 static ssize_t show_description(struct most_inst_obj *instance_obj,
664 struct most_inst_attribute *attr,
665 char *buf)
666 {
667 return snprintf(buf, PAGE_SIZE, "%s\n",
668 instance_obj->iface->description);
669 }
670
671 static ssize_t show_interface(struct most_inst_obj *instance_obj,
672 struct most_inst_attribute *attr,
673 char *buf)
674 {
675 switch (instance_obj->iface->interface) {
676 case ITYPE_LOOPBACK:
677 return snprintf(buf, PAGE_SIZE, "loopback\n");
678 case ITYPE_I2C:
679 return snprintf(buf, PAGE_SIZE, "i2c\n");
680 case ITYPE_I2S:
681 return snprintf(buf, PAGE_SIZE, "i2s\n");
682 case ITYPE_TSI:
683 return snprintf(buf, PAGE_SIZE, "tsi\n");
684 case ITYPE_HBI:
685 return snprintf(buf, PAGE_SIZE, "hbi\n");
686 case ITYPE_MEDIALB_DIM:
687 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
688 case ITYPE_MEDIALB_DIM2:
689 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
690 case ITYPE_USB:
691 return snprintf(buf, PAGE_SIZE, "usb\n");
692 case ITYPE_PCIE:
693 return snprintf(buf, PAGE_SIZE, "pcie\n");
694 }
695 return snprintf(buf, PAGE_SIZE, "unknown\n");
696 }
697
698 #define create_inst_attribute(value) \
699 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
700
701 create_inst_attribute(description);
702 create_inst_attribute(interface);
703
704 static struct attribute *most_inst_def_attrs[] = {
705 &most_inst_attr_description.attr,
706 &most_inst_attr_interface.attr,
707 NULL,
708 };
709
710 static struct kobj_type most_inst_ktype = {
711 .sysfs_ops = &most_inst_sysfs_ops,
712 .release = most_inst_release,
713 .default_attrs = most_inst_def_attrs,
714 };
715
716 static struct kset *most_inst_kset;
717
718
719 /**
720 * create_most_inst_obj - creates an instance object
721 * @name: name of the object to be created
722 *
723 * This allocates memory for an instance structure, assigns the proper kset
724 * and registers it with sysfs.
725 *
726 * Returns a pointer to the instance object or NULL when something went wrong.
727 */
728 static struct most_inst_obj *create_most_inst_obj(const char *name)
729 {
730 struct most_inst_obj *inst;
731 int retval;
732
733 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
734 if (!inst)
735 return NULL;
736 inst->kobj.kset = most_inst_kset;
737 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
738 "%s", name);
739 if (retval) {
740 kobject_put(&inst->kobj);
741 return NULL;
742 }
743 kobject_uevent(&inst->kobj, KOBJ_ADD);
744 return inst;
745 }
746
747 /**
748 * destroy_most_inst_obj - MOST instance release function
749 * @inst: pointer to the instance object
750 *
751 * This decrements the reference counter of the instance object.
752 * If the reference count turns zero, its release function is called
753 */
754 static void destroy_most_inst_obj(struct most_inst_obj *inst)
755 {
756 struct most_c_obj *c, *tmp;
757
758 /* need to destroy channels first, since
759 * each channel incremented the
760 * reference count of the inst->kobj
761 */
762 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
763 destroy_most_c_obj(c);
764 }
765 kobject_put(&inst->kobj);
766 }
767
768 /* ___ ___
769 * ___A I M___
770 */
771 struct most_aim_obj {
772 struct kobject kobj;
773 struct list_head list;
774 struct most_aim *driver;
775 char add_link[STRING_SIZE];
776 char remove_link[STRING_SIZE];
777 };
778 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
779
780 static struct list_head aim_list;
781
782
783 /**
784 * struct most_aim_attribute - to access the attributes of AIM object
785 * @attr: attributes of an AIM
786 * @show: pointer to the show function
787 * @store: pointer to the store function
788 */
789 struct most_aim_attribute {
790 struct attribute attr;
791 ssize_t (*show)(struct most_aim_obj *d,
792 struct most_aim_attribute *attr,
793 char *buf);
794 ssize_t (*store)(struct most_aim_obj *d,
795 struct most_aim_attribute *attr,
796 const char *buf,
797 size_t count);
798 };
799 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
800
801 /**
802 * aim_attr_show - show function of an AIM object
803 * @kobj: pointer to kobject
804 * @attr: pointer to attribute struct
805 * @buf: buffer
806 */
807 static ssize_t aim_attr_show(struct kobject *kobj,
808 struct attribute *attr,
809 char *buf)
810 {
811 struct most_aim_attribute *aim_attr;
812 struct most_aim_obj *aim_obj;
813
814 aim_attr = to_aim_attr(attr);
815 aim_obj = to_aim_obj(kobj);
816
817 if (!aim_attr->show)
818 return -EIO;
819
820 return aim_attr->show(aim_obj, aim_attr, buf);
821 }
822
823 /**
824 * aim_attr_store - store function of an AIM object
825 * @kobj: pointer to kobject
826 * @attr: pointer to attribute struct
827 * @buf: buffer
828 * @len: length of buffer
829 */
830 static ssize_t aim_attr_store(struct kobject *kobj,
831 struct attribute *attr,
832 const char *buf,
833 size_t len)
834 {
835 struct most_aim_attribute *aim_attr;
836 struct most_aim_obj *aim_obj;
837
838 aim_attr = to_aim_attr(attr);
839 aim_obj = to_aim_obj(kobj);
840
841 if (!aim_attr->store)
842 return -EIO;
843 return aim_attr->store(aim_obj, aim_attr, buf, len);
844 }
845
846 static const struct sysfs_ops most_aim_sysfs_ops = {
847 .show = aim_attr_show,
848 .store = aim_attr_store,
849 };
850
851 /**
852 * most_aim_release - AIM release function
853 * @kobj: pointer to AIM's kobject
854 */
855 static void most_aim_release(struct kobject *kobj)
856 {
857 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
858
859 kfree(aim_obj);
860 }
861
862 static ssize_t show_add_link(struct most_aim_obj *aim_obj,
863 struct most_aim_attribute *attr,
864 char *buf)
865 {
866 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
867 }
868
869 /**
870 * split_string - parses and changes string in the buffer buf and
871 * splits it into two mandatory and one optional substrings.
872 *
873 * @buf: complete string from attribute 'add_channel'
874 * @a: address of pointer to 1st substring (=instance name)
875 * @b: address of pointer to 2nd substring (=channel name)
876 * @c: optional address of pointer to 3rd substring (=user defined name)
877 *
878 * Examples:
879 *
880 * Input: "mdev0:ch0@ep_81:my_channel\n" or
881 * "mdev0:ch0@ep_81:my_channel"
882 *
883 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
884 *
885 * Input: "mdev0:ch0@ep_81\n"
886 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
887 *
888 * Input: "mdev0:ch0@ep_81"
889 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
890 */
891 static int split_string(char *buf, char **a, char **b, char **c)
892 {
893 *a = strsep(&buf, ":");
894 if (!*a)
895 return -EIO;
896
897 *b = strsep(&buf, ":\n");
898 if (!*b)
899 return -EIO;
900
901 if (c)
902 *c = strsep(&buf, ":\n");
903
904 return 0;
905 }
906
907 /**
908 * get_channel_by_name - get pointer to channel object
909 * @mdev: name of the device instance
910 * @mdev_ch: name of the respective channel
911 *
912 * This retrieves the pointer to a channel object.
913 */
914 static struct
915 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
916 {
917 struct most_c_obj *c, *tmp;
918 struct most_inst_obj *i, *i_tmp;
919 int found = 0;
920
921 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
922 if (!strcmp(kobject_name(&i->kobj), mdev)) {
923 found++;
924 break;
925 }
926 }
927 if (unlikely(!found))
928 return ERR_PTR(-EIO);
929
930 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
931 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
932 found++;
933 break;
934 }
935 }
936 if (unlikely(2 > found))
937 return ERR_PTR(-EIO);
938 return c;
939 }
940
941 /**
942 * store_add_link - store() function for add_link attribute
943 * @aim_obj: pointer to AIM object
944 * @attr: its attributes
945 * @buf: buffer
946 * @len: buffer length
947 *
948 * This parses the string given by buf and splits it into
949 * three substrings. Note: third substring is optional. In case a cdev
950 * AIM is loaded the optional 3rd substring will make up the name of
951 * device node in the /dev directory. If omitted, the device node will
952 * inherit the channel's name within sysfs.
953 *
954 * Searches for a pair of device and channel and probes the AIM
955 *
956 * Example:
957 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
958 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
959 *
960 * (1) would create the device node /dev/my_rxchannel
961 * (2) would create the device node /dev/mdev0-ch0@ep_81
962 */
963 static ssize_t store_add_link(struct most_aim_obj *aim_obj,
964 struct most_aim_attribute *attr,
965 const char *buf,
966 size_t len)
967 {
968 struct most_c_obj *c;
969 struct most_aim **aim_ptr;
970 char buffer[STRING_SIZE];
971 char *mdev;
972 char *mdev_ch;
973 char *mdev_devnod;
974 char devnod_buf[STRING_SIZE];
975 int ret;
976 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
977
978 strlcpy(buffer, buf, max_len);
979 strlcpy(aim_obj->add_link, buf, max_len);
980
981 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
982 if (ret)
983 return ret;
984
985 if (!mdev_devnod || *mdev_devnod == 0) {
986 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev, mdev_ch);
987 mdev_devnod = devnod_buf;
988 }
989
990 c = get_channel_by_name(mdev, mdev_ch);
991 if (IS_ERR(c))
992 return -ENODEV;
993
994 if (!c->first_aim)
995 aim_ptr = &c->first_aim;
996 else if (!c->second_aim)
997 aim_ptr = &c->second_aim;
998 else
999 return -ENOSPC;
1000
1001 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1002 &c->cfg, &c->kobj, mdev_devnod);
1003 if (ret)
1004 return ret;
1005 *aim_ptr = aim_obj->driver;
1006 return len;
1007 }
1008
1009 static struct most_aim_attribute most_aim_attr_add_link =
1010 __ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
1011
1012 static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1013 struct most_aim_attribute *attr,
1014 char *buf)
1015 {
1016 return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1017 }
1018
1019 /**
1020 * store_remove_link - store function for remove_link attribute
1021 * @aim_obj: pointer to AIM object
1022 * @attr: its attributes
1023 * @buf: buffer
1024 * @len: buffer length
1025 *
1026 * Example:
1027 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1028 */
1029 static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1030 struct most_aim_attribute *attr,
1031 const char *buf,
1032 size_t len)
1033 {
1034 struct most_c_obj *c;
1035 char buffer[STRING_SIZE];
1036 char *mdev;
1037 char *mdev_ch;
1038 int ret;
1039 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1040
1041 strlcpy(buffer, buf, max_len);
1042 strlcpy(aim_obj->remove_link, buf, max_len);
1043 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1044 if (ret)
1045 return ret;
1046
1047 c = get_channel_by_name(mdev, mdev_ch);
1048 if (IS_ERR(c))
1049 return -ENODEV;
1050
1051 if (c->first_aim == aim_obj->driver)
1052 c->first_aim = NULL;
1053 if (c->second_aim == aim_obj->driver)
1054 c->second_aim = NULL;
1055 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1056 return -EIO;
1057 return len;
1058 }
1059
1060 static struct most_aim_attribute most_aim_attr_remove_link =
1061 __ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, store_remove_link);
1062
1063 static struct attribute *most_aim_def_attrs[] = {
1064 &most_aim_attr_add_link.attr,
1065 &most_aim_attr_remove_link.attr,
1066 NULL,
1067 };
1068
1069 static struct kobj_type most_aim_ktype = {
1070 .sysfs_ops = &most_aim_sysfs_ops,
1071 .release = most_aim_release,
1072 .default_attrs = most_aim_def_attrs,
1073 };
1074
1075 static struct kset *most_aim_kset;
1076
1077 /**
1078 * create_most_aim_obj - creates an AIM object
1079 * @name: name of the AIM
1080 *
1081 * This creates an AIM object assigns the proper kset and registers
1082 * it with sysfs.
1083 * Returns a pointer to the object or NULL if something went wrong.
1084 */
1085 static struct most_aim_obj *create_most_aim_obj(const char *name)
1086 {
1087 struct most_aim_obj *most_aim;
1088 int retval;
1089
1090 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1091 if (!most_aim)
1092 return NULL;
1093 most_aim->kobj.kset = most_aim_kset;
1094 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1095 NULL, "%s", name);
1096 if (retval) {
1097 kobject_put(&most_aim->kobj);
1098 return NULL;
1099 }
1100 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1101 return most_aim;
1102 }
1103
1104 /**
1105 * destroy_most_aim_obj - AIM release function
1106 * @p: pointer to AIM object
1107 *
1108 * This decrements the reference counter of the AIM object. If the
1109 * reference count turns zero, its release function will be called.
1110 */
1111 static void destroy_most_aim_obj(struct most_aim_obj *p)
1112 {
1113 kobject_put(&p->kobj);
1114 }
1115
1116
1117 /* ___ ___
1118 * ___C O R E___
1119 */
1120
1121 /**
1122 * Instantiation of the MOST bus
1123 */
1124 static struct bus_type most_bus = {
1125 .name = "most",
1126 };
1127
1128 /**
1129 * Instantiation of the core driver
1130 */
1131 static struct device_driver mostcore = {
1132 .name = "mostcore",
1133 .bus = &most_bus,
1134 };
1135
1136 static inline void trash_mbo(struct mbo *mbo)
1137 {
1138 unsigned long flags;
1139 struct most_c_obj *c = mbo->context;
1140
1141 spin_lock_irqsave(&c->fifo_lock, flags);
1142 list_add(&mbo->list, &c->trash_fifo);
1143 spin_unlock_irqrestore(&c->fifo_lock, flags);
1144 }
1145
1146 static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1147 {
1148 unsigned long flags;
1149 struct mbo *mbo;
1150
1151 spin_lock_irqsave(&c->fifo_lock, flags);
1152 if (c->enqueue_halt || list_empty(&c->halt_fifo))
1153 mbo = NULL;
1154 else
1155 mbo = list_pop_mbo(&c->halt_fifo);
1156 spin_unlock_irqrestore(&c->fifo_lock, flags);
1157 return mbo;
1158 }
1159
1160 static void nq_hdm_mbo(struct mbo *mbo)
1161 {
1162 unsigned long flags;
1163 struct most_c_obj *c = mbo->context;
1164
1165 spin_lock_irqsave(&c->fifo_lock, flags);
1166 list_add_tail(&mbo->list, &c->halt_fifo);
1167 spin_unlock_irqrestore(&c->fifo_lock, flags);
1168 wake_up_interruptible(&c->hdm_fifo_wq);
1169 }
1170
1171 static int hdm_enqueue_thread(void *data)
1172 {
1173 struct most_c_obj *c = data;
1174 struct mbo *mbo;
1175 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1176
1177 while (likely(!kthread_should_stop())) {
1178 wait_event_interruptible(c->hdm_fifo_wq,
1179 (mbo = get_hdm_mbo(c))
1180 || kthread_should_stop());
1181
1182 if (unlikely(!mbo))
1183 continue;
1184
1185 if (c->cfg.direction == MOST_CH_RX)
1186 mbo->buffer_length = c->cfg.buffer_size;
1187
1188 if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1189 pr_err("hdm enqueue failed\n");
1190 nq_hdm_mbo(mbo);
1191 c->hdm_enqueue_task = NULL;
1192 return 0;
1193 }
1194 }
1195
1196 return 0;
1197 }
1198
1199 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1200 {
1201 struct task_struct *task =
1202 kthread_run(&hdm_enqueue_thread, c, "hdm_fifo_%d", channel_id);
1203
1204 if (IS_ERR(task))
1205 return PTR_ERR(task);
1206
1207 c->hdm_enqueue_task = task;
1208 return 0;
1209 }
1210
1211 /**
1212 * arm_mbo - recycle MBO for further usage
1213 * @mbo: buffer object
1214 *
1215 * This puts an MBO back to the list to have it ready for up coming
1216 * tx transactions.
1217 *
1218 * In case the MBO belongs to a channel that recently has been
1219 * poisoned, the MBO is scheduled to be trashed.
1220 * Calls the completion handler of an attached AIM.
1221 */
1222 static void arm_mbo(struct mbo *mbo)
1223 {
1224 unsigned long flags;
1225 struct most_c_obj *c;
1226
1227 BUG_ON((!mbo) || (!mbo->context));
1228 c = mbo->context;
1229
1230 if (c->is_poisoned) {
1231 trash_mbo(mbo);
1232 return;
1233 }
1234
1235 spin_lock_irqsave(&c->fifo_lock, flags);
1236 list_add_tail(&mbo->list, &c->fifo);
1237 spin_unlock_irqrestore(&c->fifo_lock, flags);
1238
1239 if (c->second_aim && c->second_aim->tx_completion)
1240 c->second_aim->tx_completion(c->iface, c->channel_id);
1241 if (c->first_aim && c->first_aim->tx_completion)
1242 c->first_aim->tx_completion(c->iface, c->channel_id);
1243 }
1244
1245 /**
1246 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1247 * @c: pointer to interface channel
1248 * @dir: direction of the channel
1249 * @compl: pointer to completion function
1250 *
1251 * This allocates buffer objects including the containing DMA coherent
1252 * buffer and puts them in the fifo.
1253 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1254 * submitted to the HDM.
1255 *
1256 * Returns the number of allocated and enqueued MBOs.
1257 */
1258 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1259 void (*compl)(struct mbo *))
1260 {
1261 unsigned int i;
1262 int retval;
1263 struct mbo *mbo;
1264 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1265
1266 atomic_set(&c->mbo_nq_level, 0);
1267
1268 for (i = 0; i < c->cfg.num_buffers; i++) {
1269 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1270 if (!mbo) {
1271 pr_info("WARN: Allocation of MBO failed.\n");
1272 retval = i;
1273 goto _exit;
1274 }
1275 mbo->context = c;
1276 mbo->ifp = c->iface;
1277 mbo->hdm_channel_id = c->channel_id;
1278 mbo->virt_address = dma_alloc_coherent(NULL,
1279 coherent_buf_size,
1280 &mbo->bus_address,
1281 GFP_KERNEL);
1282 if (!mbo->virt_address) {
1283 pr_info("WARN: No DMA coherent buffer.\n");
1284 retval = i;
1285 goto _error1;
1286 }
1287 mbo->complete = compl;
1288 if (dir == MOST_CH_RX) {
1289 nq_hdm_mbo(mbo);
1290 atomic_inc(&c->mbo_nq_level);
1291 } else {
1292 arm_mbo(mbo);
1293 }
1294 }
1295 return i;
1296
1297 _error1:
1298 kfree(mbo);
1299 _exit:
1300 return retval;
1301 }
1302
1303 /**
1304 * most_submit_mbo - submits an MBO to fifo
1305 * @mbo: pointer to the MBO
1306 *
1307 */
1308 int most_submit_mbo(struct mbo *mbo)
1309 {
1310 struct most_c_obj *c;
1311 struct most_inst_obj *i;
1312
1313 if (unlikely((!mbo) || (!mbo->context))) {
1314 pr_err("Bad MBO or missing channel reference\n");
1315 return -EINVAL;
1316 }
1317 c = mbo->context;
1318 i = c->inst;
1319
1320 if (unlikely(atomic_read(&i->tainted)))
1321 return -ENODEV;
1322
1323 nq_hdm_mbo(mbo);
1324 return 0;
1325 }
1326 EXPORT_SYMBOL_GPL(most_submit_mbo);
1327
1328 /**
1329 * most_write_completion - write completion handler
1330 * @mbo: pointer to MBO
1331 *
1332 * This recycles the MBO for further usage. In case the channel has been
1333 * poisoned, the MBO is scheduled to be trashed.
1334 */
1335 static void most_write_completion(struct mbo *mbo)
1336 {
1337 struct most_c_obj *c;
1338
1339 BUG_ON((!mbo) || (!mbo->context));
1340
1341 c = mbo->context;
1342 if (mbo->status == MBO_E_INVAL)
1343 pr_info("WARN: Tx MBO status: invalid\n");
1344 if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
1345 trash_mbo(mbo);
1346 else
1347 arm_mbo(mbo);
1348 }
1349
1350 /**
1351 * get_channel_by_iface - get pointer to channel object
1352 * @iface: pointer to interface instance
1353 * @id: channel ID
1354 *
1355 * This retrieves a pointer to a channel of the given interface and channel ID.
1356 */
1357 static struct
1358 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1359 {
1360 struct most_inst_obj *i;
1361
1362 if (unlikely(!iface)) {
1363 pr_err("Bad interface\n");
1364 return NULL;
1365 }
1366 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1367 pr_err("Channel index (%d) out of range\n", id);
1368 return NULL;
1369 }
1370 i = iface->priv;
1371 if (unlikely(!i)) {
1372 pr_err("interface is not registered\n");
1373 return NULL;
1374 }
1375 return i->channel[id];
1376 }
1377
1378 /**
1379 * most_get_mbo - get pointer to an MBO of pool
1380 * @iface: pointer to interface instance
1381 * @id: channel ID
1382 *
1383 * This attempts to get a free buffer out of the channel fifo.
1384 * Returns a pointer to MBO on success or NULL otherwise.
1385 */
1386 struct mbo *most_get_mbo(struct most_interface *iface, int id)
1387 {
1388 struct mbo *mbo;
1389 struct most_c_obj *c;
1390 unsigned long flags;
1391
1392 c = get_channel_by_iface(iface, id);
1393 if (unlikely(!c))
1394 return NULL;
1395 spin_lock_irqsave(&c->fifo_lock, flags);
1396 if (list_empty(&c->fifo)) {
1397 spin_unlock_irqrestore(&c->fifo_lock, flags);
1398 return NULL;
1399 }
1400 mbo = list_pop_mbo(&c->fifo);
1401 spin_unlock_irqrestore(&c->fifo_lock, flags);
1402 mbo->buffer_length = c->cfg.buffer_size;
1403 return mbo;
1404 }
1405 EXPORT_SYMBOL_GPL(most_get_mbo);
1406
1407
1408 /**
1409 * most_put_mbo - return buffer to pool
1410 * @mbo: buffer object
1411 */
1412 void most_put_mbo(struct mbo *mbo)
1413 {
1414 struct most_c_obj *c;
1415 struct most_inst_obj *i;
1416
1417 c = mbo->context;
1418 i = c->inst;
1419
1420 if (unlikely(atomic_read(&i->tainted))) {
1421 mbo->status = MBO_E_CLOSE;
1422 trash_mbo(mbo);
1423 return;
1424 }
1425 if (c->cfg.direction == MOST_CH_TX) {
1426 arm_mbo(mbo);
1427 return;
1428 }
1429 nq_hdm_mbo(mbo);
1430 atomic_inc(&c->mbo_nq_level);
1431 }
1432 EXPORT_SYMBOL_GPL(most_put_mbo);
1433
1434 /**
1435 * most_read_completion - read completion handler
1436 * @mbo: pointer to MBO
1437 *
1438 * This function is called by the HDM when data has been received from the
1439 * hardware and copied to the buffer of the MBO.
1440 *
1441 * In case the channel has been poisoned it puts the buffer in the trash queue.
1442 * Otherwise, it passes the buffer to an AIM for further processing.
1443 */
1444 static void most_read_completion(struct mbo *mbo)
1445 {
1446 struct most_c_obj *c;
1447
1448 c = mbo->context;
1449 if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
1450 goto release_mbo;
1451
1452 if (mbo->status == MBO_E_INVAL) {
1453 nq_hdm_mbo(mbo);
1454 atomic_inc(&c->mbo_nq_level);
1455 return;
1456 }
1457
1458 if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1459 pr_info("WARN: rx device out of buffers\n");
1460 c->is_starving = 1;
1461 }
1462
1463 if (c->first_aim && c->first_aim->rx_completion &&
1464 c->first_aim->rx_completion(mbo) == 0)
1465 return;
1466 if (c->second_aim && c->second_aim->rx_completion &&
1467 c->second_aim->rx_completion(mbo) == 0)
1468 return;
1469 pr_info("WARN: no driver linked with this channel\n");
1470 mbo->status = MBO_E_CLOSE;
1471 release_mbo:
1472 trash_mbo(mbo);
1473 }
1474
1475 /**
1476 * most_start_channel - prepares a channel for communication
1477 * @iface: pointer to interface instance
1478 * @id: channel ID
1479 *
1480 * This prepares the channel for usage. Cross-checks whether the
1481 * channel's been properly configured.
1482 *
1483 * Returns 0 on success or error code otherwise.
1484 */
1485 int most_start_channel(struct most_interface *iface, int id)
1486 {
1487 int num_buffer;
1488 int ret;
1489 struct most_c_obj *c = get_channel_by_iface(iface, id);
1490
1491 if (unlikely(!c))
1492 return -EINVAL;
1493
1494 if (c->is_started)
1495 return -EBUSY;
1496
1497 if (!try_module_get(iface->mod)) {
1498 pr_info("failed to acquire HDM lock\n");
1499 return -ENOLCK;
1500 }
1501 modref++;
1502
1503 c->cfg.extra_len = 0;
1504 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1505 pr_info("channel configuration failed. Go check settings...\n");
1506 ret = -EINVAL;
1507 goto error;
1508 }
1509
1510 init_waitqueue_head(&c->hdm_fifo_wq);
1511
1512 if (c->cfg.direction == MOST_CH_RX)
1513 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1514 most_read_completion);
1515 else
1516 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1517 most_write_completion);
1518 if (unlikely(0 == num_buffer)) {
1519 pr_info("failed to allocate memory\n");
1520 ret = -ENOMEM;
1521 goto error;
1522 }
1523
1524 ret = run_enqueue_thread(c, id);
1525 if (ret)
1526 goto error;
1527
1528 c->is_started = true;
1529 c->is_starving = 0;
1530 atomic_set(&c->mbo_ref, num_buffer);
1531 return 0;
1532 error:
1533 if (iface->mod)
1534 module_put(iface->mod);
1535 modref--;
1536 return ret;
1537 }
1538 EXPORT_SYMBOL_GPL(most_start_channel);
1539
1540 /**
1541 * most_stop_channel - stops a running channel
1542 * @iface: pointer to interface instance
1543 * @id: channel ID
1544 */
1545 int most_stop_channel(struct most_interface *iface, int id)
1546 {
1547 struct most_c_obj *c;
1548
1549 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1550 pr_err("Bad interface or index out of range\n");
1551 return -EINVAL;
1552 }
1553 c = get_channel_by_iface(iface, id);
1554 if (unlikely(!c))
1555 return -EINVAL;
1556
1557 if (!c->is_started)
1558 return 0;
1559
1560 /* FIXME: we need to know calling AIM to reset only one link */
1561 c->first_aim = NULL;
1562 c->second_aim = NULL;
1563 /* do not go into recursion calling aim->disconnect_channel */
1564
1565 mutex_lock(&c->stop_task_mutex);
1566 if (c->hdm_enqueue_task)
1567 kthread_stop(c->hdm_enqueue_task);
1568 c->hdm_enqueue_task = NULL;
1569 mutex_unlock(&c->stop_task_mutex);
1570
1571 mutex_lock(&deregister_mutex);
1572 if (atomic_read(&c->inst->tainted)) {
1573 mutex_unlock(&deregister_mutex);
1574 return -ENODEV;
1575 }
1576 mutex_unlock(&deregister_mutex);
1577
1578 if (iface->mod && modref) {
1579 module_put(iface->mod);
1580 modref--;
1581 }
1582
1583 c->is_poisoned = true;
1584 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1585 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1586 c->iface->description);
1587 return -EAGAIN;
1588 }
1589 flush_trash_fifo(c);
1590 flush_channel_fifos(c);
1591
1592 #ifdef CMPL_INTERRUPTIBLE
1593 if (wait_for_completion_interruptible(&c->cleanup)) {
1594 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1595 return -EINTR;
1596 }
1597 #else
1598 wait_for_completion(&c->cleanup);
1599 #endif
1600 c->is_poisoned = false;
1601 c->is_started = false;
1602 return 0;
1603 }
1604 EXPORT_SYMBOL_GPL(most_stop_channel);
1605
1606 /**
1607 * most_register_aim - registers an AIM (driver) with the core
1608 * @aim: instance of AIM to be registered
1609 */
1610 int most_register_aim(struct most_aim *aim)
1611 {
1612 struct most_aim_obj *aim_obj;
1613
1614 if (!aim) {
1615 pr_err("Bad driver\n");
1616 return -EINVAL;
1617 }
1618 aim_obj = create_most_aim_obj(aim->name);
1619 if (!aim_obj) {
1620 pr_info("failed to alloc driver object\n");
1621 return -ENOMEM;
1622 }
1623 aim_obj->driver = aim;
1624 aim->context = aim_obj;
1625 pr_info("registered new application interfacing module %s\n",
1626 aim->name);
1627 list_add_tail(&aim_obj->list, &aim_list);
1628 return 0;
1629 }
1630 EXPORT_SYMBOL_GPL(most_register_aim);
1631
1632 /**
1633 * most_deregister_aim - deregisters an AIM (driver) with the core
1634 * @aim: AIM to be removed
1635 */
1636 int most_deregister_aim(struct most_aim *aim)
1637 {
1638 struct most_aim_obj *aim_obj;
1639 struct most_c_obj *c, *tmp;
1640 struct most_inst_obj *i, *i_tmp;
1641
1642 if (!aim) {
1643 pr_err("Bad driver\n");
1644 return -EINVAL;
1645 }
1646
1647 aim_obj = aim->context;
1648 if (!aim_obj) {
1649 pr_info("driver not registered.\n");
1650 return -EINVAL;
1651 }
1652 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1653 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1654 if (c->first_aim == aim || c->second_aim == aim)
1655 aim->disconnect_channel(
1656 c->iface, c->channel_id);
1657 if (c->first_aim == aim)
1658 c->first_aim = NULL;
1659 if (c->second_aim == aim)
1660 c->second_aim = NULL;
1661 }
1662 }
1663 list_del(&aim_obj->list);
1664 destroy_most_aim_obj(aim_obj);
1665 pr_info("deregistering application interfacing module %s\n", aim->name);
1666 return 0;
1667 }
1668 EXPORT_SYMBOL_GPL(most_deregister_aim);
1669
1670 /**
1671 * most_register_interface - registers an interface with core
1672 * @iface: pointer to the instance of the interface description.
1673 *
1674 * Allocates and initializes a new interface instance and all of its channels.
1675 * Returns a pointer to kobject or an error pointer.
1676 */
1677 struct kobject *most_register_interface(struct most_interface *iface)
1678 {
1679 unsigned int i;
1680 int id;
1681 char name[STRING_SIZE];
1682 char channel_name[STRING_SIZE];
1683 struct most_c_obj *c;
1684 struct most_inst_obj *inst;
1685
1686 if (!iface || !iface->enqueue || !iface->configure ||
1687 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1688 pr_err("Bad interface or channel overflow\n");
1689 return ERR_PTR(-EINVAL);
1690 }
1691
1692 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1693 if (id < 0) {
1694 pr_info("Failed to alloc mdev ID\n");
1695 return ERR_PTR(id);
1696 }
1697 snprintf(name, STRING_SIZE, "mdev%d", id);
1698
1699 inst = create_most_inst_obj(name);
1700 if (!inst) {
1701 pr_info("Failed to allocate interface instance\n");
1702 return ERR_PTR(-ENOMEM);
1703 }
1704
1705 iface->priv = inst;
1706 INIT_LIST_HEAD(&inst->channel_list);
1707 inst->iface = iface;
1708 inst->dev_id = id;
1709 atomic_set(&inst->tainted, 0);
1710 list_add_tail(&inst->list, &instance_list);
1711
1712 for (i = 0; i < iface->num_channels; i++) {
1713 const char *name_suffix = iface->channel_vector[i].name_suffix;
1714
1715 if (!name_suffix)
1716 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1717 else if (name_suffix[0] == '@')
1718 snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1719 name_suffix);
1720 else
1721 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1722
1723 /* this increments the reference count of this instance */
1724 c = create_most_c_obj(channel_name, &inst->kobj);
1725 if (!c)
1726 goto free_instance;
1727 inst->channel[i] = c;
1728 c->is_starving = 0;
1729 c->iface = iface;
1730 c->inst = inst;
1731 c->channel_id = i;
1732 c->keep_mbo = false;
1733 c->enqueue_halt = false;
1734 c->is_poisoned = false;
1735 c->is_started = false;
1736 c->cfg.direction = 0;
1737 c->cfg.data_type = 0;
1738 c->cfg.num_buffers = 0;
1739 c->cfg.buffer_size = 0;
1740 c->cfg.subbuffer_size = 0;
1741 c->cfg.packets_per_xact = 0;
1742 spin_lock_init(&c->fifo_lock);
1743 INIT_LIST_HEAD(&c->fifo);
1744 INIT_LIST_HEAD(&c->trash_fifo);
1745 INIT_LIST_HEAD(&c->halt_fifo);
1746 init_completion(&c->cleanup);
1747 atomic_set(&c->mbo_ref, 0);
1748 mutex_init(&c->stop_task_mutex);
1749 list_add_tail(&c->list, &inst->channel_list);
1750 }
1751 pr_info("registered new MOST device mdev%d (%s)\n",
1752 inst->dev_id, iface->description);
1753 return &inst->kobj;
1754
1755 free_instance:
1756 pr_info("Failed allocate channel(s)\n");
1757 list_del(&inst->list);
1758 destroy_most_inst_obj(inst);
1759 return ERR_PTR(-ENOMEM);
1760 }
1761 EXPORT_SYMBOL_GPL(most_register_interface);
1762
1763 /**
1764 * most_deregister_interface - deregisters an interface with core
1765 * @iface: pointer to the interface instance description.
1766 *
1767 * Before removing an interface instance from the list, all running
1768 * channels are stopped and poisoned.
1769 */
1770 void most_deregister_interface(struct most_interface *iface)
1771 {
1772 struct most_inst_obj *i = iface->priv;
1773 struct most_c_obj *c;
1774
1775 mutex_lock(&deregister_mutex);
1776 if (unlikely(!i)) {
1777 pr_info("Bad Interface\n");
1778 mutex_unlock(&deregister_mutex);
1779 return;
1780 }
1781 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1782 iface->description);
1783
1784 atomic_set(&i->tainted, 1);
1785 mutex_unlock(&deregister_mutex);
1786
1787 while (modref) {
1788 if (iface->mod && modref)
1789 module_put(iface->mod);
1790 modref--;
1791 }
1792
1793 list_for_each_entry(c, &i->channel_list, list) {
1794 if (!c->is_started)
1795 continue;
1796
1797 mutex_lock(&c->stop_task_mutex);
1798 if (c->hdm_enqueue_task)
1799 kthread_stop(c->hdm_enqueue_task);
1800 c->hdm_enqueue_task = NULL;
1801 mutex_unlock(&c->stop_task_mutex);
1802
1803 if (iface->poison_channel(iface, c->channel_id))
1804 pr_err("Can't poison channel %d\n", c->channel_id);
1805 }
1806 ida_simple_remove(&mdev_id, i->dev_id);
1807 list_del(&i->list);
1808 destroy_most_inst_obj(i);
1809 }
1810 EXPORT_SYMBOL_GPL(most_deregister_interface);
1811
1812 /**
1813 * most_stop_enqueue - prevents core from enqueueing MBOs
1814 * @iface: pointer to interface
1815 * @id: channel id
1816 *
1817 * This is called by an HDM that _cannot_ attend to its duties and
1818 * is imminent to get run over by the core. The core is not going to
1819 * enqueue any further packets unless the flagging HDM calls
1820 * most_resume enqueue().
1821 */
1822 void most_stop_enqueue(struct most_interface *iface, int id)
1823 {
1824 struct most_c_obj *c = get_channel_by_iface(iface, id);
1825
1826 if (likely(c))
1827 c->enqueue_halt = true;
1828 }
1829 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1830
1831 /**
1832 * most_resume_enqueue - allow core to enqueue MBOs again
1833 * @iface: pointer to interface
1834 * @id: channel id
1835 *
1836 * This clears the enqueue halt flag and enqueues all MBOs currently
1837 * sitting in the wait fifo.
1838 */
1839 void most_resume_enqueue(struct most_interface *iface, int id)
1840 {
1841 struct most_c_obj *c = get_channel_by_iface(iface, id);
1842
1843 if (unlikely(!c))
1844 return;
1845 c->enqueue_halt = false;
1846
1847 wake_up_interruptible(&c->hdm_fifo_wq);
1848 }
1849 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1850
1851 static int __init most_init(void)
1852 {
1853 pr_info("init()\n");
1854 INIT_LIST_HEAD(&instance_list);
1855 INIT_LIST_HEAD(&aim_list);
1856 mutex_init(&deregister_mutex);
1857 ida_init(&mdev_id);
1858
1859 if (bus_register(&most_bus)) {
1860 pr_info("Cannot register most bus\n");
1861 goto exit;
1862 }
1863
1864 most_class = class_create(THIS_MODULE, "most");
1865 if (IS_ERR(most_class)) {
1866 pr_info("No udev support.\n");
1867 goto exit_bus;
1868 }
1869 if (driver_register(&mostcore)) {
1870 pr_info("Cannot register core driver\n");
1871 goto exit_class;
1872 }
1873
1874 class_glue_dir =
1875 device_create(most_class, NULL, 0, NULL, "mostcore");
1876 if (!class_glue_dir)
1877 goto exit_driver;
1878
1879 most_aim_kset =
1880 kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1881 if (!most_aim_kset)
1882 goto exit_class_container;
1883
1884 most_inst_kset =
1885 kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1886 if (!most_inst_kset)
1887 goto exit_driver_kset;
1888
1889 return 0;
1890
1891 exit_driver_kset:
1892 kset_unregister(most_aim_kset);
1893 exit_class_container:
1894 device_destroy(most_class, 0);
1895 exit_driver:
1896 driver_unregister(&mostcore);
1897 exit_class:
1898 class_destroy(most_class);
1899 exit_bus:
1900 bus_unregister(&most_bus);
1901 exit:
1902 return -ENOMEM;
1903 }
1904
1905 static void __exit most_exit(void)
1906 {
1907 struct most_inst_obj *i, *i_tmp;
1908 struct most_aim_obj *d, *d_tmp;
1909
1910 pr_info("exit core module\n");
1911 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1912 destroy_most_aim_obj(d);
1913 }
1914
1915 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1916 list_del(&i->list);
1917 destroy_most_inst_obj(i);
1918 }
1919 kset_unregister(most_inst_kset);
1920 kset_unregister(most_aim_kset);
1921 device_destroy(most_class, 0);
1922 driver_unregister(&mostcore);
1923 class_destroy(most_class);
1924 bus_unregister(&most_bus);
1925 ida_destroy(&mdev_id);
1926 }
1927
1928 module_init(most_init);
1929 module_exit(most_exit);
1930 MODULE_LICENSE("GPL");
1931 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1932 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");