2 * core.c - Implementation of core module of MOST Linux driver stack
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
32 #define MAX_CHANNELS 64
33 #define STRING_SIZE 80
35 static struct class *most_class
;
36 static struct device
*class_glue_dir
;
37 static struct ida mdev_id
;
42 struct completion cleanup
;
44 atomic_t mbo_nq_level
;
49 struct most_interface
*iface
;
50 struct most_inst_obj
*inst
;
51 struct most_channel_config cfg
;
54 struct list_head fifo
;
56 struct list_head halt_fifo
;
57 struct list_head list
;
58 struct most_aim
*first_aim
;
59 struct most_aim
*second_aim
;
60 struct list_head trash_fifo
;
61 struct task_struct
*hdm_enqueue_task
;
62 struct mutex stop_task_mutex
;
63 wait_queue_head_t hdm_fifo_wq
;
65 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
67 struct most_inst_obj
{
70 struct most_interface
*iface
;
71 struct list_head channel_list
;
72 struct most_c_obj
*channel
[MAX_CHANNELS
];
74 struct list_head list
;
76 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
79 * list_pop_mbo - retrieves the first MBO of the list and removes it
80 * @ptr: the list head to grab the MBO from.
82 #define list_pop_mbo(ptr) \
84 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
85 list_del(&_mbo->list); \
89 static struct mutex deregister_mutex
;
96 * struct most_c_attr - to access the attributes of a channel object
97 * @attr: attributes of a channel
98 * @show: pointer to the show function
99 * @store: pointer to the store function
102 struct attribute attr
;
103 ssize_t (*show
)(struct most_c_obj
*d
,
104 struct most_c_attr
*attr
,
106 ssize_t (*store
)(struct most_c_obj
*d
,
107 struct most_c_attr
*attr
,
111 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
113 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
114 struct most_c_attr most_chnl_attr_##_name = \
115 __ATTR(_name, _mode, _show, _store)
118 * channel_attr_show - show function of channel object
119 * @kobj: pointer to its kobject
120 * @attr: pointer to its attributes
123 static ssize_t
channel_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
126 struct most_c_attr
*channel_attr
= to_channel_attr(attr
);
127 struct most_c_obj
*c_obj
= to_c_obj(kobj
);
129 if (!channel_attr
->show
)
132 return channel_attr
->show(c_obj
, channel_attr
, buf
);
136 * channel_attr_store - store function of channel object
137 * @kobj: pointer to its kobject
138 * @attr: pointer to its attributes
140 * @len: length of buffer
142 static ssize_t
channel_attr_store(struct kobject
*kobj
,
143 struct attribute
*attr
,
147 struct most_c_attr
*channel_attr
= to_channel_attr(attr
);
148 struct most_c_obj
*c_obj
= to_c_obj(kobj
);
150 if (!channel_attr
->store
)
152 return channel_attr
->store(c_obj
, channel_attr
, buf
, len
);
155 static const struct sysfs_ops most_channel_sysfs_ops
= {
156 .show
= channel_attr_show
,
157 .store
= channel_attr_store
,
161 * most_free_mbo_coherent - free an MBO and its coherent buffer
162 * @mbo: buffer to be released
165 static void most_free_mbo_coherent(struct mbo
*mbo
)
167 struct most_c_obj
*c
= mbo
->context
;
168 u16
const coherent_buf_size
= c
->cfg
.buffer_size
+ c
->cfg
.extra_len
;
170 dma_free_coherent(NULL
, coherent_buf_size
, mbo
->virt_address
,
173 if (atomic_sub_and_test(1, &c
->mbo_ref
))
174 complete(&c
->cleanup
);
178 * flush_channel_fifos - clear the channel fifos
179 * @c: pointer to channel object
181 static void flush_channel_fifos(struct most_c_obj
*c
)
183 unsigned long flags
, hf_flags
;
184 struct mbo
*mbo
, *tmp
;
186 if (list_empty(&c
->fifo
) && list_empty(&c
->halt_fifo
))
189 spin_lock_irqsave(&c
->fifo_lock
, flags
);
190 list_for_each_entry_safe(mbo
, tmp
, &c
->fifo
, list
) {
191 list_del(&mbo
->list
);
192 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
194 most_free_mbo_coherent(mbo
);
195 spin_lock_irqsave(&c
->fifo_lock
, flags
);
197 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
199 spin_lock_irqsave(&c
->fifo_lock
, hf_flags
);
200 list_for_each_entry_safe(mbo
, tmp
, &c
->halt_fifo
, list
) {
201 list_del(&mbo
->list
);
202 spin_unlock_irqrestore(&c
->fifo_lock
, hf_flags
);
204 most_free_mbo_coherent(mbo
);
205 spin_lock_irqsave(&c
->fifo_lock
, hf_flags
);
207 spin_unlock_irqrestore(&c
->fifo_lock
, hf_flags
);
209 if (unlikely((!list_empty(&c
->fifo
) || !list_empty(&c
->halt_fifo
))))
210 pr_info("WARN: fifo | trash fifo not empty\n");
214 * flush_trash_fifo - clear the trash fifo
215 * @c: pointer to channel object
217 static int flush_trash_fifo(struct most_c_obj
*c
)
219 struct mbo
*mbo
, *tmp
;
222 spin_lock_irqsave(&c
->fifo_lock
, flags
);
223 list_for_each_entry_safe(mbo
, tmp
, &c
->trash_fifo
, list
) {
224 list_del(&mbo
->list
);
225 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
226 most_free_mbo_coherent(mbo
);
227 spin_lock_irqsave(&c
->fifo_lock
, flags
);
229 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
234 * most_channel_release - release function of channel object
235 * @kobj: pointer to channel's kobject
237 static void most_channel_release(struct kobject
*kobj
)
239 struct most_c_obj
*c
= to_c_obj(kobj
);
244 static ssize_t
show_available_directions(struct most_c_obj
*c
,
245 struct most_c_attr
*attr
,
248 unsigned int i
= c
->channel_id
;
251 if (c
->iface
->channel_vector
[i
].direction
& MOST_CH_RX
)
252 strcat(buf
, "dir_rx ");
253 if (c
->iface
->channel_vector
[i
].direction
& MOST_CH_TX
)
254 strcat(buf
, "dir_tx ");
256 return strlen(buf
) + 1;
259 static ssize_t
show_available_datatypes(struct most_c_obj
*c
,
260 struct most_c_attr
*attr
,
263 unsigned int i
= c
->channel_id
;
266 if (c
->iface
->channel_vector
[i
].data_type
& MOST_CH_CONTROL
)
267 strcat(buf
, "control ");
268 if (c
->iface
->channel_vector
[i
].data_type
& MOST_CH_ASYNC
)
269 strcat(buf
, "async ");
270 if (c
->iface
->channel_vector
[i
].data_type
& MOST_CH_SYNC
)
271 strcat(buf
, "sync ");
272 if (c
->iface
->channel_vector
[i
].data_type
& MOST_CH_ISOC_AVP
)
273 strcat(buf
, "isoc_avp ");
275 return strlen(buf
) + 1;
279 ssize_t
show_number_of_packet_buffers(struct most_c_obj
*c
,
280 struct most_c_attr
*attr
,
283 unsigned int i
= c
->channel_id
;
285 return snprintf(buf
, PAGE_SIZE
, "%d\n",
286 c
->iface
->channel_vector
[i
].num_buffers_packet
);
290 ssize_t
show_number_of_stream_buffers(struct most_c_obj
*c
,
291 struct most_c_attr
*attr
,
294 unsigned int i
= c
->channel_id
;
296 return snprintf(buf
, PAGE_SIZE
, "%d\n",
297 c
->iface
->channel_vector
[i
].num_buffers_streaming
);
301 ssize_t
show_size_of_packet_buffer(struct most_c_obj
*c
,
302 struct most_c_attr
*attr
,
305 unsigned int i
= c
->channel_id
;
307 return snprintf(buf
, PAGE_SIZE
, "%d\n",
308 c
->iface
->channel_vector
[i
].buffer_size_packet
);
312 ssize_t
show_size_of_stream_buffer(struct most_c_obj
*c
,
313 struct most_c_attr
*attr
,
316 unsigned int i
= c
->channel_id
;
318 return snprintf(buf
, PAGE_SIZE
, "%d\n",
319 c
->iface
->channel_vector
[i
].buffer_size_streaming
);
322 static ssize_t
show_channel_starving(struct most_c_obj
*c
,
323 struct most_c_attr
*attr
,
326 return snprintf(buf
, PAGE_SIZE
, "%d\n", c
->is_starving
);
330 #define create_show_channel_attribute(val) \
331 static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
333 create_show_channel_attribute(available_directions
);
334 create_show_channel_attribute(available_datatypes
);
335 create_show_channel_attribute(number_of_packet_buffers
);
336 create_show_channel_attribute(number_of_stream_buffers
);
337 create_show_channel_attribute(size_of_stream_buffer
);
338 create_show_channel_attribute(size_of_packet_buffer
);
339 create_show_channel_attribute(channel_starving
);
341 static ssize_t
show_set_number_of_buffers(struct most_c_obj
*c
,
342 struct most_c_attr
*attr
,
345 return snprintf(buf
, PAGE_SIZE
, "%d\n", c
->cfg
.num_buffers
);
348 static ssize_t
store_set_number_of_buffers(struct most_c_obj
*c
,
349 struct most_c_attr
*attr
,
353 int ret
= kstrtou16(buf
, 0, &c
->cfg
.num_buffers
);
360 static ssize_t
show_set_buffer_size(struct most_c_obj
*c
,
361 struct most_c_attr
*attr
,
364 return snprintf(buf
, PAGE_SIZE
, "%d\n", c
->cfg
.buffer_size
);
367 static ssize_t
store_set_buffer_size(struct most_c_obj
*c
,
368 struct most_c_attr
*attr
,
372 int ret
= kstrtou16(buf
, 0, &c
->cfg
.buffer_size
);
379 static ssize_t
show_set_direction(struct most_c_obj
*c
,
380 struct most_c_attr
*attr
,
383 if (c
->cfg
.direction
& MOST_CH_TX
)
384 return snprintf(buf
, PAGE_SIZE
, "dir_tx\n");
385 else if (c
->cfg
.direction
& MOST_CH_RX
)
386 return snprintf(buf
, PAGE_SIZE
, "dir_rx\n");
387 return snprintf(buf
, PAGE_SIZE
, "unconfigured\n");
390 static ssize_t
store_set_direction(struct most_c_obj
*c
,
391 struct most_c_attr
*attr
,
395 if (!strcmp(buf
, "dir_rx\n"))
396 c
->cfg
.direction
= MOST_CH_RX
;
397 else if (!strcmp(buf
, "dir_tx\n"))
398 c
->cfg
.direction
= MOST_CH_TX
;
400 pr_info("WARN: invalid attribute settings\n");
406 static ssize_t
show_set_datatype(struct most_c_obj
*c
,
407 struct most_c_attr
*attr
,
410 if (c
->cfg
.data_type
& MOST_CH_CONTROL
)
411 return snprintf(buf
, PAGE_SIZE
, "control\n");
412 else if (c
->cfg
.data_type
& MOST_CH_ASYNC
)
413 return snprintf(buf
, PAGE_SIZE
, "async\n");
414 else if (c
->cfg
.data_type
& MOST_CH_SYNC
)
415 return snprintf(buf
, PAGE_SIZE
, "sync\n");
416 else if (c
->cfg
.data_type
& MOST_CH_ISOC_AVP
)
417 return snprintf(buf
, PAGE_SIZE
, "isoc_avp\n");
418 return snprintf(buf
, PAGE_SIZE
, "unconfigured\n");
421 static ssize_t
store_set_datatype(struct most_c_obj
*c
,
422 struct most_c_attr
*attr
,
426 if (!strcmp(buf
, "control\n"))
427 c
->cfg
.data_type
= MOST_CH_CONTROL
;
428 else if (!strcmp(buf
, "async\n"))
429 c
->cfg
.data_type
= MOST_CH_ASYNC
;
430 else if (!strcmp(buf
, "sync\n"))
431 c
->cfg
.data_type
= MOST_CH_SYNC
;
432 else if (!strcmp(buf
, "isoc_avp\n"))
433 c
->cfg
.data_type
= MOST_CH_ISOC_AVP
;
435 pr_info("WARN: invalid attribute settings\n");
441 static ssize_t
show_set_subbuffer_size(struct most_c_obj
*c
,
442 struct most_c_attr
*attr
,
445 return snprintf(buf
, PAGE_SIZE
, "%d\n", c
->cfg
.subbuffer_size
);
448 static ssize_t
store_set_subbuffer_size(struct most_c_obj
*c
,
449 struct most_c_attr
*attr
,
453 int ret
= kstrtou16(buf
, 0, &c
->cfg
.subbuffer_size
);
460 static ssize_t
show_set_packets_per_xact(struct most_c_obj
*c
,
461 struct most_c_attr
*attr
,
464 return snprintf(buf
, PAGE_SIZE
, "%d\n", c
->cfg
.packets_per_xact
);
467 static ssize_t
store_set_packets_per_xact(struct most_c_obj
*c
,
468 struct most_c_attr
*attr
,
472 int ret
= kstrtou16(buf
, 0, &c
->cfg
.packets_per_xact
);
479 #define create_channel_attribute(value) \
480 static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
484 create_channel_attribute(set_buffer_size
);
485 create_channel_attribute(set_number_of_buffers
);
486 create_channel_attribute(set_direction
);
487 create_channel_attribute(set_datatype
);
488 create_channel_attribute(set_subbuffer_size
);
489 create_channel_attribute(set_packets_per_xact
);
493 * most_channel_def_attrs - array of default attributes of channel object
495 static struct attribute
*most_channel_def_attrs
[] = {
496 &most_chnl_attr_available_directions
.attr
,
497 &most_chnl_attr_available_datatypes
.attr
,
498 &most_chnl_attr_number_of_packet_buffers
.attr
,
499 &most_chnl_attr_number_of_stream_buffers
.attr
,
500 &most_chnl_attr_size_of_packet_buffer
.attr
,
501 &most_chnl_attr_size_of_stream_buffer
.attr
,
502 &most_chnl_attr_set_number_of_buffers
.attr
,
503 &most_chnl_attr_set_buffer_size
.attr
,
504 &most_chnl_attr_set_direction
.attr
,
505 &most_chnl_attr_set_datatype
.attr
,
506 &most_chnl_attr_set_subbuffer_size
.attr
,
507 &most_chnl_attr_set_packets_per_xact
.attr
,
508 &most_chnl_attr_channel_starving
.attr
,
512 static struct kobj_type most_channel_ktype
= {
513 .sysfs_ops
= &most_channel_sysfs_ops
,
514 .release
= most_channel_release
,
515 .default_attrs
= most_channel_def_attrs
,
518 static struct kset
*most_channel_kset
;
521 * create_most_c_obj - allocates a channel object
522 * @name: name of the channel object
523 * @parent: parent kobject
525 * This create a channel object and registers it with sysfs.
526 * Returns a pointer to the object or NULL when something went wrong.
528 static struct most_c_obj
*
529 create_most_c_obj(const char *name
, struct kobject
*parent
)
531 struct most_c_obj
*c
;
534 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
537 c
->kobj
.kset
= most_channel_kset
;
538 retval
= kobject_init_and_add(&c
->kobj
, &most_channel_ktype
, parent
,
541 kobject_put(&c
->kobj
);
544 kobject_uevent(&c
->kobj
, KOBJ_ADD
);
549 * destroy_most_c_obj - channel release function
550 * @c: pointer to channel object
552 * This decrements the reference counter of the channel object.
553 * If the reference count turns zero, its release function is called.
555 static void destroy_most_c_obj(struct most_c_obj
*c
)
558 c
->first_aim
->disconnect_channel(c
->iface
, c
->channel_id
);
560 c
->second_aim
->disconnect_channel(c
->iface
, c
->channel_id
);
562 c
->second_aim
= NULL
;
564 mutex_lock(&deregister_mutex
);
566 flush_channel_fifos(c
);
567 mutex_unlock(&deregister_mutex
);
568 kobject_put(&c
->kobj
);
572 * ___I N S T A N C E___
574 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
575 struct most_inst_attribute most_inst_attr_##_name = \
576 __ATTR(_name, _mode, _show, _store)
578 static struct list_head instance_list
;
581 * struct most_inst_attribute - to access the attributes of instance object
582 * @attr: attributes of an instance
583 * @show: pointer to the show function
584 * @store: pointer to the store function
586 struct most_inst_attribute
{
587 struct attribute attr
;
588 ssize_t (*show
)(struct most_inst_obj
*d
,
589 struct most_inst_attribute
*attr
,
591 ssize_t (*store
)(struct most_inst_obj
*d
,
592 struct most_inst_attribute
*attr
,
596 #define to_instance_attr(a) \
597 container_of(a, struct most_inst_attribute, attr)
600 * instance_attr_show - show function for an instance object
601 * @kobj: pointer to kobject
602 * @attr: pointer to attribute struct
605 static ssize_t
instance_attr_show(struct kobject
*kobj
,
606 struct attribute
*attr
,
609 struct most_inst_attribute
*instance_attr
;
610 struct most_inst_obj
*instance_obj
;
612 instance_attr
= to_instance_attr(attr
);
613 instance_obj
= to_inst_obj(kobj
);
615 if (!instance_attr
->show
)
618 return instance_attr
->show(instance_obj
, instance_attr
, buf
);
622 * instance_attr_store - store function for an instance object
623 * @kobj: pointer to kobject
624 * @attr: pointer to attribute struct
626 * @len: length of buffer
628 static ssize_t
instance_attr_store(struct kobject
*kobj
,
629 struct attribute
*attr
,
633 struct most_inst_attribute
*instance_attr
;
634 struct most_inst_obj
*instance_obj
;
636 instance_attr
= to_instance_attr(attr
);
637 instance_obj
= to_inst_obj(kobj
);
639 if (!instance_attr
->store
)
642 return instance_attr
->store(instance_obj
, instance_attr
, buf
, len
);
645 static const struct sysfs_ops most_inst_sysfs_ops
= {
646 .show
= instance_attr_show
,
647 .store
= instance_attr_store
,
651 * most_inst_release - release function for instance object
652 * @kobj: pointer to instance's kobject
654 * This frees the allocated memory for the instance object
656 static void most_inst_release(struct kobject
*kobj
)
658 struct most_inst_obj
*inst
= to_inst_obj(kobj
);
663 static ssize_t
show_description(struct most_inst_obj
*instance_obj
,
664 struct most_inst_attribute
*attr
,
667 return snprintf(buf
, PAGE_SIZE
, "%s\n",
668 instance_obj
->iface
->description
);
671 static ssize_t
show_interface(struct most_inst_obj
*instance_obj
,
672 struct most_inst_attribute
*attr
,
675 switch (instance_obj
->iface
->interface
) {
677 return snprintf(buf
, PAGE_SIZE
, "loopback\n");
679 return snprintf(buf
, PAGE_SIZE
, "i2c\n");
681 return snprintf(buf
, PAGE_SIZE
, "i2s\n");
683 return snprintf(buf
, PAGE_SIZE
, "tsi\n");
685 return snprintf(buf
, PAGE_SIZE
, "hbi\n");
686 case ITYPE_MEDIALB_DIM
:
687 return snprintf(buf
, PAGE_SIZE
, "mlb_dim\n");
688 case ITYPE_MEDIALB_DIM2
:
689 return snprintf(buf
, PAGE_SIZE
, "mlb_dim2\n");
691 return snprintf(buf
, PAGE_SIZE
, "usb\n");
693 return snprintf(buf
, PAGE_SIZE
, "pcie\n");
695 return snprintf(buf
, PAGE_SIZE
, "unknown\n");
698 #define create_inst_attribute(value) \
699 static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
701 create_inst_attribute(description
);
702 create_inst_attribute(interface
);
704 static struct attribute
*most_inst_def_attrs
[] = {
705 &most_inst_attr_description
.attr
,
706 &most_inst_attr_interface
.attr
,
710 static struct kobj_type most_inst_ktype
= {
711 .sysfs_ops
= &most_inst_sysfs_ops
,
712 .release
= most_inst_release
,
713 .default_attrs
= most_inst_def_attrs
,
716 static struct kset
*most_inst_kset
;
720 * create_most_inst_obj - creates an instance object
721 * @name: name of the object to be created
723 * This allocates memory for an instance structure, assigns the proper kset
724 * and registers it with sysfs.
726 * Returns a pointer to the instance object or NULL when something went wrong.
728 static struct most_inst_obj
*create_most_inst_obj(const char *name
)
730 struct most_inst_obj
*inst
;
733 inst
= kzalloc(sizeof(*inst
), GFP_KERNEL
);
736 inst
->kobj
.kset
= most_inst_kset
;
737 retval
= kobject_init_and_add(&inst
->kobj
, &most_inst_ktype
, NULL
,
740 kobject_put(&inst
->kobj
);
743 kobject_uevent(&inst
->kobj
, KOBJ_ADD
);
748 * destroy_most_inst_obj - MOST instance release function
749 * @inst: pointer to the instance object
751 * This decrements the reference counter of the instance object.
752 * If the reference count turns zero, its release function is called
754 static void destroy_most_inst_obj(struct most_inst_obj
*inst
)
756 struct most_c_obj
*c
, *tmp
;
758 /* need to destroy channels first, since
759 * each channel incremented the
760 * reference count of the inst->kobj
762 list_for_each_entry_safe(c
, tmp
, &inst
->channel_list
, list
) {
763 destroy_most_c_obj(c
);
765 kobject_put(&inst
->kobj
);
771 struct most_aim_obj
{
773 struct list_head list
;
774 struct most_aim
*driver
;
775 char add_link
[STRING_SIZE
];
776 char remove_link
[STRING_SIZE
];
778 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
780 static struct list_head aim_list
;
784 * struct most_aim_attribute - to access the attributes of AIM object
785 * @attr: attributes of an AIM
786 * @show: pointer to the show function
787 * @store: pointer to the store function
789 struct most_aim_attribute
{
790 struct attribute attr
;
791 ssize_t (*show
)(struct most_aim_obj
*d
,
792 struct most_aim_attribute
*attr
,
794 ssize_t (*store
)(struct most_aim_obj
*d
,
795 struct most_aim_attribute
*attr
,
799 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
802 * aim_attr_show - show function of an AIM object
803 * @kobj: pointer to kobject
804 * @attr: pointer to attribute struct
807 static ssize_t
aim_attr_show(struct kobject
*kobj
,
808 struct attribute
*attr
,
811 struct most_aim_attribute
*aim_attr
;
812 struct most_aim_obj
*aim_obj
;
814 aim_attr
= to_aim_attr(attr
);
815 aim_obj
= to_aim_obj(kobj
);
820 return aim_attr
->show(aim_obj
, aim_attr
, buf
);
824 * aim_attr_store - store function of an AIM object
825 * @kobj: pointer to kobject
826 * @attr: pointer to attribute struct
828 * @len: length of buffer
830 static ssize_t
aim_attr_store(struct kobject
*kobj
,
831 struct attribute
*attr
,
835 struct most_aim_attribute
*aim_attr
;
836 struct most_aim_obj
*aim_obj
;
838 aim_attr
= to_aim_attr(attr
);
839 aim_obj
= to_aim_obj(kobj
);
841 if (!aim_attr
->store
)
843 return aim_attr
->store(aim_obj
, aim_attr
, buf
, len
);
846 static const struct sysfs_ops most_aim_sysfs_ops
= {
847 .show
= aim_attr_show
,
848 .store
= aim_attr_store
,
852 * most_aim_release - AIM release function
853 * @kobj: pointer to AIM's kobject
855 static void most_aim_release(struct kobject
*kobj
)
857 struct most_aim_obj
*aim_obj
= to_aim_obj(kobj
);
862 static ssize_t
show_add_link(struct most_aim_obj
*aim_obj
,
863 struct most_aim_attribute
*attr
,
866 return snprintf(buf
, PAGE_SIZE
, "%s\n", aim_obj
->add_link
);
870 * split_string - parses and changes string in the buffer buf and
871 * splits it into two mandatory and one optional substrings.
873 * @buf: complete string from attribute 'add_channel'
874 * @a: address of pointer to 1st substring (=instance name)
875 * @b: address of pointer to 2nd substring (=channel name)
876 * @c: optional address of pointer to 3rd substring (=user defined name)
880 * Input: "mdev0:ch0@ep_81:my_channel\n" or
881 * "mdev0:ch0@ep_81:my_channel"
883 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
885 * Input: "mdev0:ch0@ep_81\n"
886 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
888 * Input: "mdev0:ch0@ep_81"
889 * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
891 static int split_string(char *buf
, char **a
, char **b
, char **c
)
893 *a
= strsep(&buf
, ":");
897 *b
= strsep(&buf
, ":\n");
902 *c
= strsep(&buf
, ":\n");
908 * get_channel_by_name - get pointer to channel object
909 * @mdev: name of the device instance
910 * @mdev_ch: name of the respective channel
912 * This retrieves the pointer to a channel object.
915 most_c_obj
*get_channel_by_name(char *mdev
, char *mdev_ch
)
917 struct most_c_obj
*c
, *tmp
;
918 struct most_inst_obj
*i
, *i_tmp
;
921 list_for_each_entry_safe(i
, i_tmp
, &instance_list
, list
) {
922 if (!strcmp(kobject_name(&i
->kobj
), mdev
)) {
927 if (unlikely(!found
))
928 return ERR_PTR(-EIO
);
930 list_for_each_entry_safe(c
, tmp
, &i
->channel_list
, list
) {
931 if (!strcmp(kobject_name(&c
->kobj
), mdev_ch
)) {
936 if (unlikely(2 > found
))
937 return ERR_PTR(-EIO
);
942 * store_add_link - store() function for add_link attribute
943 * @aim_obj: pointer to AIM object
944 * @attr: its attributes
946 * @len: buffer length
948 * This parses the string given by buf and splits it into
949 * three substrings. Note: third substring is optional. In case a cdev
950 * AIM is loaded the optional 3rd substring will make up the name of
951 * device node in the /dev directory. If omitted, the device node will
952 * inherit the channel's name within sysfs.
954 * Searches for a pair of device and channel and probes the AIM
957 * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
958 * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
960 * (1) would create the device node /dev/my_rxchannel
961 * (2) would create the device node /dev/mdev0-ch0@ep_81
963 static ssize_t
store_add_link(struct most_aim_obj
*aim_obj
,
964 struct most_aim_attribute
*attr
,
968 struct most_c_obj
*c
;
969 struct most_aim
**aim_ptr
;
970 char buffer
[STRING_SIZE
];
974 char devnod_buf
[STRING_SIZE
];
976 size_t max_len
= min_t(size_t, len
+ 1, STRING_SIZE
);
978 strlcpy(buffer
, buf
, max_len
);
979 strlcpy(aim_obj
->add_link
, buf
, max_len
);
981 ret
= split_string(buffer
, &mdev
, &mdev_ch
, &mdev_devnod
);
985 if (!mdev_devnod
|| *mdev_devnod
== 0) {
986 snprintf(devnod_buf
, sizeof(devnod_buf
), "%s-%s", mdev
, mdev_ch
);
987 mdev_devnod
= devnod_buf
;
990 c
= get_channel_by_name(mdev
, mdev_ch
);
995 aim_ptr
= &c
->first_aim
;
996 else if (!c
->second_aim
)
997 aim_ptr
= &c
->second_aim
;
1001 ret
= aim_obj
->driver
->probe_channel(c
->iface
, c
->channel_id
,
1002 &c
->cfg
, &c
->kobj
, mdev_devnod
);
1005 *aim_ptr
= aim_obj
->driver
;
1009 static struct most_aim_attribute most_aim_attr_add_link
=
1010 __ATTR(add_link
, S_IRUGO
| S_IWUSR
, show_add_link
, store_add_link
);
1012 static ssize_t
show_remove_link(struct most_aim_obj
*aim_obj
,
1013 struct most_aim_attribute
*attr
,
1016 return snprintf(buf
, PAGE_SIZE
, "%s\n", aim_obj
->remove_link
);
1020 * store_remove_link - store function for remove_link attribute
1021 * @aim_obj: pointer to AIM object
1022 * @attr: its attributes
1024 * @len: buffer length
1027 * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1029 static ssize_t
store_remove_link(struct most_aim_obj
*aim_obj
,
1030 struct most_aim_attribute
*attr
,
1034 struct most_c_obj
*c
;
1035 char buffer
[STRING_SIZE
];
1039 size_t max_len
= min_t(size_t, len
+ 1, STRING_SIZE
);
1041 strlcpy(buffer
, buf
, max_len
);
1042 strlcpy(aim_obj
->remove_link
, buf
, max_len
);
1043 ret
= split_string(buffer
, &mdev
, &mdev_ch
, NULL
);
1047 c
= get_channel_by_name(mdev
, mdev_ch
);
1051 if (c
->first_aim
== aim_obj
->driver
)
1052 c
->first_aim
= NULL
;
1053 if (c
->second_aim
== aim_obj
->driver
)
1054 c
->second_aim
= NULL
;
1055 if (aim_obj
->driver
->disconnect_channel(c
->iface
, c
->channel_id
))
1060 static struct most_aim_attribute most_aim_attr_remove_link
=
1061 __ATTR(remove_link
, S_IRUGO
| S_IWUSR
, show_remove_link
, store_remove_link
);
1063 static struct attribute
*most_aim_def_attrs
[] = {
1064 &most_aim_attr_add_link
.attr
,
1065 &most_aim_attr_remove_link
.attr
,
1069 static struct kobj_type most_aim_ktype
= {
1070 .sysfs_ops
= &most_aim_sysfs_ops
,
1071 .release
= most_aim_release
,
1072 .default_attrs
= most_aim_def_attrs
,
1075 static struct kset
*most_aim_kset
;
1078 * create_most_aim_obj - creates an AIM object
1079 * @name: name of the AIM
1081 * This creates an AIM object assigns the proper kset and registers
1083 * Returns a pointer to the object or NULL if something went wrong.
1085 static struct most_aim_obj
*create_most_aim_obj(const char *name
)
1087 struct most_aim_obj
*most_aim
;
1090 most_aim
= kzalloc(sizeof(*most_aim
), GFP_KERNEL
);
1093 most_aim
->kobj
.kset
= most_aim_kset
;
1094 retval
= kobject_init_and_add(&most_aim
->kobj
, &most_aim_ktype
,
1097 kobject_put(&most_aim
->kobj
);
1100 kobject_uevent(&most_aim
->kobj
, KOBJ_ADD
);
1105 * destroy_most_aim_obj - AIM release function
1106 * @p: pointer to AIM object
1108 * This decrements the reference counter of the AIM object. If the
1109 * reference count turns zero, its release function will be called.
1111 static void destroy_most_aim_obj(struct most_aim_obj
*p
)
1113 kobject_put(&p
->kobj
);
1122 * Instantiation of the MOST bus
1124 static struct bus_type most_bus
= {
1129 * Instantiation of the core driver
1131 static struct device_driver mostcore
= {
1136 static inline void trash_mbo(struct mbo
*mbo
)
1138 unsigned long flags
;
1139 struct most_c_obj
*c
= mbo
->context
;
1141 spin_lock_irqsave(&c
->fifo_lock
, flags
);
1142 list_add(&mbo
->list
, &c
->trash_fifo
);
1143 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1146 static struct mbo
*get_hdm_mbo(struct most_c_obj
*c
)
1148 unsigned long flags
;
1151 spin_lock_irqsave(&c
->fifo_lock
, flags
);
1152 if (c
->enqueue_halt
|| list_empty(&c
->halt_fifo
))
1155 mbo
= list_pop_mbo(&c
->halt_fifo
);
1156 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1160 static void nq_hdm_mbo(struct mbo
*mbo
)
1162 unsigned long flags
;
1163 struct most_c_obj
*c
= mbo
->context
;
1165 spin_lock_irqsave(&c
->fifo_lock
, flags
);
1166 list_add_tail(&mbo
->list
, &c
->halt_fifo
);
1167 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1168 wake_up_interruptible(&c
->hdm_fifo_wq
);
1171 static int hdm_enqueue_thread(void *data
)
1173 struct most_c_obj
*c
= data
;
1175 typeof(c
->iface
->enqueue
) enqueue
= c
->iface
->enqueue
;
1177 while (likely(!kthread_should_stop())) {
1178 wait_event_interruptible(c
->hdm_fifo_wq
,
1179 (mbo
= get_hdm_mbo(c
))
1180 || kthread_should_stop());
1185 if (c
->cfg
.direction
== MOST_CH_RX
)
1186 mbo
->buffer_length
= c
->cfg
.buffer_size
;
1188 if (unlikely(enqueue(mbo
->ifp
, mbo
->hdm_channel_id
, mbo
))) {
1189 pr_err("hdm enqueue failed\n");
1191 c
->hdm_enqueue_task
= NULL
;
1199 static int run_enqueue_thread(struct most_c_obj
*c
, int channel_id
)
1201 struct task_struct
*task
=
1202 kthread_run(&hdm_enqueue_thread
, c
, "hdm_fifo_%d", channel_id
);
1205 return PTR_ERR(task
);
1207 c
->hdm_enqueue_task
= task
;
1212 * arm_mbo - recycle MBO for further usage
1213 * @mbo: buffer object
1215 * This puts an MBO back to the list to have it ready for up coming
1218 * In case the MBO belongs to a channel that recently has been
1219 * poisoned, the MBO is scheduled to be trashed.
1220 * Calls the completion handler of an attached AIM.
1222 static void arm_mbo(struct mbo
*mbo
)
1224 unsigned long flags
;
1225 struct most_c_obj
*c
;
1227 BUG_ON((!mbo
) || (!mbo
->context
));
1230 if (c
->is_poisoned
) {
1235 spin_lock_irqsave(&c
->fifo_lock
, flags
);
1236 list_add_tail(&mbo
->list
, &c
->fifo
);
1237 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1239 if (c
->second_aim
&& c
->second_aim
->tx_completion
)
1240 c
->second_aim
->tx_completion(c
->iface
, c
->channel_id
);
1241 if (c
->first_aim
&& c
->first_aim
->tx_completion
)
1242 c
->first_aim
->tx_completion(c
->iface
, c
->channel_id
);
1246 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1247 * @c: pointer to interface channel
1248 * @dir: direction of the channel
1249 * @compl: pointer to completion function
1251 * This allocates buffer objects including the containing DMA coherent
1252 * buffer and puts them in the fifo.
1253 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1254 * submitted to the HDM.
1256 * Returns the number of allocated and enqueued MBOs.
1258 static int arm_mbo_chain(struct most_c_obj
*c
, int dir
,
1259 void (*compl)(struct mbo
*))
1264 u32 coherent_buf_size
= c
->cfg
.buffer_size
+ c
->cfg
.extra_len
;
1266 atomic_set(&c
->mbo_nq_level
, 0);
1268 for (i
= 0; i
< c
->cfg
.num_buffers
; i
++) {
1269 mbo
= kzalloc(sizeof(*mbo
), GFP_KERNEL
);
1271 pr_info("WARN: Allocation of MBO failed.\n");
1276 mbo
->ifp
= c
->iface
;
1277 mbo
->hdm_channel_id
= c
->channel_id
;
1278 mbo
->virt_address
= dma_alloc_coherent(NULL
,
1282 if (!mbo
->virt_address
) {
1283 pr_info("WARN: No DMA coherent buffer.\n");
1287 mbo
->complete
= compl;
1288 if (dir
== MOST_CH_RX
) {
1290 atomic_inc(&c
->mbo_nq_level
);
1304 * most_submit_mbo - submits an MBO to fifo
1305 * @mbo: pointer to the MBO
1308 int most_submit_mbo(struct mbo
*mbo
)
1310 struct most_c_obj
*c
;
1311 struct most_inst_obj
*i
;
1313 if (unlikely((!mbo
) || (!mbo
->context
))) {
1314 pr_err("Bad MBO or missing channel reference\n");
1320 if (unlikely(atomic_read(&i
->tainted
)))
1326 EXPORT_SYMBOL_GPL(most_submit_mbo
);
1329 * most_write_completion - write completion handler
1330 * @mbo: pointer to MBO
1332 * This recycles the MBO for further usage. In case the channel has been
1333 * poisoned, the MBO is scheduled to be trashed.
1335 static void most_write_completion(struct mbo
*mbo
)
1337 struct most_c_obj
*c
;
1339 BUG_ON((!mbo
) || (!mbo
->context
));
1342 if (mbo
->status
== MBO_E_INVAL
)
1343 pr_info("WARN: Tx MBO status: invalid\n");
1344 if (unlikely((c
->is_poisoned
== true) || (mbo
->status
== MBO_E_CLOSE
)))
1351 * get_channel_by_iface - get pointer to channel object
1352 * @iface: pointer to interface instance
1355 * This retrieves a pointer to a channel of the given interface and channel ID.
1358 most_c_obj
*get_channel_by_iface(struct most_interface
*iface
, int id
)
1360 struct most_inst_obj
*i
;
1362 if (unlikely(!iface
)) {
1363 pr_err("Bad interface\n");
1366 if (unlikely((id
< 0) || (id
>= iface
->num_channels
))) {
1367 pr_err("Channel index (%d) out of range\n", id
);
1372 pr_err("interface is not registered\n");
1375 return i
->channel
[id
];
1379 * most_get_mbo - get pointer to an MBO of pool
1380 * @iface: pointer to interface instance
1383 * This attempts to get a free buffer out of the channel fifo.
1384 * Returns a pointer to MBO on success or NULL otherwise.
1386 struct mbo
*most_get_mbo(struct most_interface
*iface
, int id
)
1389 struct most_c_obj
*c
;
1390 unsigned long flags
;
1392 c
= get_channel_by_iface(iface
, id
);
1395 spin_lock_irqsave(&c
->fifo_lock
, flags
);
1396 if (list_empty(&c
->fifo
)) {
1397 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1400 mbo
= list_pop_mbo(&c
->fifo
);
1401 spin_unlock_irqrestore(&c
->fifo_lock
, flags
);
1402 mbo
->buffer_length
= c
->cfg
.buffer_size
;
1405 EXPORT_SYMBOL_GPL(most_get_mbo
);
1409 * most_put_mbo - return buffer to pool
1410 * @mbo: buffer object
1412 void most_put_mbo(struct mbo
*mbo
)
1414 struct most_c_obj
*c
;
1415 struct most_inst_obj
*i
;
1420 if (unlikely(atomic_read(&i
->tainted
))) {
1421 mbo
->status
= MBO_E_CLOSE
;
1425 if (c
->cfg
.direction
== MOST_CH_TX
) {
1430 atomic_inc(&c
->mbo_nq_level
);
1432 EXPORT_SYMBOL_GPL(most_put_mbo
);
1435 * most_read_completion - read completion handler
1436 * @mbo: pointer to MBO
1438 * This function is called by the HDM when data has been received from the
1439 * hardware and copied to the buffer of the MBO.
1441 * In case the channel has been poisoned it puts the buffer in the trash queue.
1442 * Otherwise, it passes the buffer to an AIM for further processing.
1444 static void most_read_completion(struct mbo
*mbo
)
1446 struct most_c_obj
*c
;
1449 if (unlikely((c
->is_poisoned
== true) || (mbo
->status
== MBO_E_CLOSE
)))
1452 if (mbo
->status
== MBO_E_INVAL
) {
1454 atomic_inc(&c
->mbo_nq_level
);
1458 if (atomic_sub_and_test(1, &c
->mbo_nq_level
)) {
1459 pr_info("WARN: rx device out of buffers\n");
1463 if (c
->first_aim
&& c
->first_aim
->rx_completion
&&
1464 c
->first_aim
->rx_completion(mbo
) == 0)
1466 if (c
->second_aim
&& c
->second_aim
->rx_completion
&&
1467 c
->second_aim
->rx_completion(mbo
) == 0)
1469 pr_info("WARN: no driver linked with this channel\n");
1470 mbo
->status
= MBO_E_CLOSE
;
1476 * most_start_channel - prepares a channel for communication
1477 * @iface: pointer to interface instance
1480 * This prepares the channel for usage. Cross-checks whether the
1481 * channel's been properly configured.
1483 * Returns 0 on success or error code otherwise.
1485 int most_start_channel(struct most_interface
*iface
, int id
)
1489 struct most_c_obj
*c
= get_channel_by_iface(iface
, id
);
1497 if (!try_module_get(iface
->mod
)) {
1498 pr_info("failed to acquire HDM lock\n");
1503 c
->cfg
.extra_len
= 0;
1504 if (c
->iface
->configure(c
->iface
, c
->channel_id
, &c
->cfg
)) {
1505 pr_info("channel configuration failed. Go check settings...\n");
1510 init_waitqueue_head(&c
->hdm_fifo_wq
);
1512 if (c
->cfg
.direction
== MOST_CH_RX
)
1513 num_buffer
= arm_mbo_chain(c
, c
->cfg
.direction
,
1514 most_read_completion
);
1516 num_buffer
= arm_mbo_chain(c
, c
->cfg
.direction
,
1517 most_write_completion
);
1518 if (unlikely(0 == num_buffer
)) {
1519 pr_info("failed to allocate memory\n");
1524 ret
= run_enqueue_thread(c
, id
);
1528 c
->is_started
= true;
1530 atomic_set(&c
->mbo_ref
, num_buffer
);
1534 module_put(iface
->mod
);
1538 EXPORT_SYMBOL_GPL(most_start_channel
);
1541 * most_stop_channel - stops a running channel
1542 * @iface: pointer to interface instance
1545 int most_stop_channel(struct most_interface
*iface
, int id
)
1547 struct most_c_obj
*c
;
1549 if (unlikely((!iface
) || (id
>= iface
->num_channels
) || (id
< 0))) {
1550 pr_err("Bad interface or index out of range\n");
1553 c
= get_channel_by_iface(iface
, id
);
1560 /* FIXME: we need to know calling AIM to reset only one link */
1561 c
->first_aim
= NULL
;
1562 c
->second_aim
= NULL
;
1563 /* do not go into recursion calling aim->disconnect_channel */
1565 mutex_lock(&c
->stop_task_mutex
);
1566 if (c
->hdm_enqueue_task
)
1567 kthread_stop(c
->hdm_enqueue_task
);
1568 c
->hdm_enqueue_task
= NULL
;
1569 mutex_unlock(&c
->stop_task_mutex
);
1571 mutex_lock(&deregister_mutex
);
1572 if (atomic_read(&c
->inst
->tainted
)) {
1573 mutex_unlock(&deregister_mutex
);
1576 mutex_unlock(&deregister_mutex
);
1578 if (iface
->mod
&& modref
) {
1579 module_put(iface
->mod
);
1583 c
->is_poisoned
= true;
1584 if (c
->iface
->poison_channel(c
->iface
, c
->channel_id
)) {
1585 pr_err("Cannot stop channel %d of mdev %s\n", c
->channel_id
,
1586 c
->iface
->description
);
1589 flush_trash_fifo(c
);
1590 flush_channel_fifos(c
);
1592 #ifdef CMPL_INTERRUPTIBLE
1593 if (wait_for_completion_interruptible(&c
->cleanup
)) {
1594 pr_info("Interrupted while clean up ch %d\n", c
->channel_id
);
1598 wait_for_completion(&c
->cleanup
);
1600 c
->is_poisoned
= false;
1601 c
->is_started
= false;
1604 EXPORT_SYMBOL_GPL(most_stop_channel
);
1607 * most_register_aim - registers an AIM (driver) with the core
1608 * @aim: instance of AIM to be registered
1610 int most_register_aim(struct most_aim
*aim
)
1612 struct most_aim_obj
*aim_obj
;
1615 pr_err("Bad driver\n");
1618 aim_obj
= create_most_aim_obj(aim
->name
);
1620 pr_info("failed to alloc driver object\n");
1623 aim_obj
->driver
= aim
;
1624 aim
->context
= aim_obj
;
1625 pr_info("registered new application interfacing module %s\n",
1627 list_add_tail(&aim_obj
->list
, &aim_list
);
1630 EXPORT_SYMBOL_GPL(most_register_aim
);
1633 * most_deregister_aim - deregisters an AIM (driver) with the core
1634 * @aim: AIM to be removed
1636 int most_deregister_aim(struct most_aim
*aim
)
1638 struct most_aim_obj
*aim_obj
;
1639 struct most_c_obj
*c
, *tmp
;
1640 struct most_inst_obj
*i
, *i_tmp
;
1643 pr_err("Bad driver\n");
1647 aim_obj
= aim
->context
;
1649 pr_info("driver not registered.\n");
1652 list_for_each_entry_safe(i
, i_tmp
, &instance_list
, list
) {
1653 list_for_each_entry_safe(c
, tmp
, &i
->channel_list
, list
) {
1654 if (c
->first_aim
== aim
|| c
->second_aim
== aim
)
1655 aim
->disconnect_channel(
1656 c
->iface
, c
->channel_id
);
1657 if (c
->first_aim
== aim
)
1658 c
->first_aim
= NULL
;
1659 if (c
->second_aim
== aim
)
1660 c
->second_aim
= NULL
;
1663 list_del(&aim_obj
->list
);
1664 destroy_most_aim_obj(aim_obj
);
1665 pr_info("deregistering application interfacing module %s\n", aim
->name
);
1668 EXPORT_SYMBOL_GPL(most_deregister_aim
);
1671 * most_register_interface - registers an interface with core
1672 * @iface: pointer to the instance of the interface description.
1674 * Allocates and initializes a new interface instance and all of its channels.
1675 * Returns a pointer to kobject or an error pointer.
1677 struct kobject
*most_register_interface(struct most_interface
*iface
)
1681 char name
[STRING_SIZE
];
1682 char channel_name
[STRING_SIZE
];
1683 struct most_c_obj
*c
;
1684 struct most_inst_obj
*inst
;
1686 if (!iface
|| !iface
->enqueue
|| !iface
->configure
||
1687 !iface
->poison_channel
|| (iface
->num_channels
> MAX_CHANNELS
)) {
1688 pr_err("Bad interface or channel overflow\n");
1689 return ERR_PTR(-EINVAL
);
1692 id
= ida_simple_get(&mdev_id
, 0, 0, GFP_KERNEL
);
1694 pr_info("Failed to alloc mdev ID\n");
1697 snprintf(name
, STRING_SIZE
, "mdev%d", id
);
1699 inst
= create_most_inst_obj(name
);
1701 pr_info("Failed to allocate interface instance\n");
1702 return ERR_PTR(-ENOMEM
);
1706 INIT_LIST_HEAD(&inst
->channel_list
);
1707 inst
->iface
= iface
;
1709 atomic_set(&inst
->tainted
, 0);
1710 list_add_tail(&inst
->list
, &instance_list
);
1712 for (i
= 0; i
< iface
->num_channels
; i
++) {
1713 const char *name_suffix
= iface
->channel_vector
[i
].name_suffix
;
1716 snprintf(channel_name
, STRING_SIZE
, "ch%d", i
);
1717 else if (name_suffix
[0] == '@')
1718 snprintf(channel_name
, STRING_SIZE
, "ch%d%s", i
,
1721 snprintf(channel_name
, STRING_SIZE
, "%s", name_suffix
);
1723 /* this increments the reference count of this instance */
1724 c
= create_most_c_obj(channel_name
, &inst
->kobj
);
1727 inst
->channel
[i
] = c
;
1732 c
->keep_mbo
= false;
1733 c
->enqueue_halt
= false;
1734 c
->is_poisoned
= false;
1735 c
->is_started
= false;
1736 c
->cfg
.direction
= 0;
1737 c
->cfg
.data_type
= 0;
1738 c
->cfg
.num_buffers
= 0;
1739 c
->cfg
.buffer_size
= 0;
1740 c
->cfg
.subbuffer_size
= 0;
1741 c
->cfg
.packets_per_xact
= 0;
1742 spin_lock_init(&c
->fifo_lock
);
1743 INIT_LIST_HEAD(&c
->fifo
);
1744 INIT_LIST_HEAD(&c
->trash_fifo
);
1745 INIT_LIST_HEAD(&c
->halt_fifo
);
1746 init_completion(&c
->cleanup
);
1747 atomic_set(&c
->mbo_ref
, 0);
1748 mutex_init(&c
->stop_task_mutex
);
1749 list_add_tail(&c
->list
, &inst
->channel_list
);
1751 pr_info("registered new MOST device mdev%d (%s)\n",
1752 inst
->dev_id
, iface
->description
);
1756 pr_info("Failed allocate channel(s)\n");
1757 list_del(&inst
->list
);
1758 destroy_most_inst_obj(inst
);
1759 return ERR_PTR(-ENOMEM
);
1761 EXPORT_SYMBOL_GPL(most_register_interface
);
1764 * most_deregister_interface - deregisters an interface with core
1765 * @iface: pointer to the interface instance description.
1767 * Before removing an interface instance from the list, all running
1768 * channels are stopped and poisoned.
1770 void most_deregister_interface(struct most_interface
*iface
)
1772 struct most_inst_obj
*i
= iface
->priv
;
1773 struct most_c_obj
*c
;
1775 mutex_lock(&deregister_mutex
);
1777 pr_info("Bad Interface\n");
1778 mutex_unlock(&deregister_mutex
);
1781 pr_info("deregistering MOST device %s (%s)\n", i
->kobj
.name
,
1782 iface
->description
);
1784 atomic_set(&i
->tainted
, 1);
1785 mutex_unlock(&deregister_mutex
);
1788 if (iface
->mod
&& modref
)
1789 module_put(iface
->mod
);
1793 list_for_each_entry(c
, &i
->channel_list
, list
) {
1797 mutex_lock(&c
->stop_task_mutex
);
1798 if (c
->hdm_enqueue_task
)
1799 kthread_stop(c
->hdm_enqueue_task
);
1800 c
->hdm_enqueue_task
= NULL
;
1801 mutex_unlock(&c
->stop_task_mutex
);
1803 if (iface
->poison_channel(iface
, c
->channel_id
))
1804 pr_err("Can't poison channel %d\n", c
->channel_id
);
1806 ida_simple_remove(&mdev_id
, i
->dev_id
);
1808 destroy_most_inst_obj(i
);
1810 EXPORT_SYMBOL_GPL(most_deregister_interface
);
1813 * most_stop_enqueue - prevents core from enqueueing MBOs
1814 * @iface: pointer to interface
1817 * This is called by an HDM that _cannot_ attend to its duties and
1818 * is imminent to get run over by the core. The core is not going to
1819 * enqueue any further packets unless the flagging HDM calls
1820 * most_resume enqueue().
1822 void most_stop_enqueue(struct most_interface
*iface
, int id
)
1824 struct most_c_obj
*c
= get_channel_by_iface(iface
, id
);
1827 c
->enqueue_halt
= true;
1829 EXPORT_SYMBOL_GPL(most_stop_enqueue
);
1832 * most_resume_enqueue - allow core to enqueue MBOs again
1833 * @iface: pointer to interface
1836 * This clears the enqueue halt flag and enqueues all MBOs currently
1837 * sitting in the wait fifo.
1839 void most_resume_enqueue(struct most_interface
*iface
, int id
)
1841 struct most_c_obj
*c
= get_channel_by_iface(iface
, id
);
1845 c
->enqueue_halt
= false;
1847 wake_up_interruptible(&c
->hdm_fifo_wq
);
1849 EXPORT_SYMBOL_GPL(most_resume_enqueue
);
1851 static int __init
most_init(void)
1853 pr_info("init()\n");
1854 INIT_LIST_HEAD(&instance_list
);
1855 INIT_LIST_HEAD(&aim_list
);
1856 mutex_init(&deregister_mutex
);
1859 if (bus_register(&most_bus
)) {
1860 pr_info("Cannot register most bus\n");
1864 most_class
= class_create(THIS_MODULE
, "most");
1865 if (IS_ERR(most_class
)) {
1866 pr_info("No udev support.\n");
1869 if (driver_register(&mostcore
)) {
1870 pr_info("Cannot register core driver\n");
1875 device_create(most_class
, NULL
, 0, NULL
, "mostcore");
1876 if (!class_glue_dir
)
1880 kset_create_and_add("aims", NULL
, &class_glue_dir
->kobj
);
1882 goto exit_class_container
;
1885 kset_create_and_add("devices", NULL
, &class_glue_dir
->kobj
);
1886 if (!most_inst_kset
)
1887 goto exit_driver_kset
;
1892 kset_unregister(most_aim_kset
);
1893 exit_class_container
:
1894 device_destroy(most_class
, 0);
1896 driver_unregister(&mostcore
);
1898 class_destroy(most_class
);
1900 bus_unregister(&most_bus
);
1905 static void __exit
most_exit(void)
1907 struct most_inst_obj
*i
, *i_tmp
;
1908 struct most_aim_obj
*d
, *d_tmp
;
1910 pr_info("exit core module\n");
1911 list_for_each_entry_safe(d
, d_tmp
, &aim_list
, list
) {
1912 destroy_most_aim_obj(d
);
1915 list_for_each_entry_safe(i
, i_tmp
, &instance_list
, list
) {
1917 destroy_most_inst_obj(i
);
1919 kset_unregister(most_inst_kset
);
1920 kset_unregister(most_aim_kset
);
1921 device_destroy(most_class
, 0);
1922 driver_unregister(&mostcore
);
1923 class_destroy(most_class
);
1924 bus_unregister(&most_bus
);
1925 ida_destroy(&mdev_id
);
1928 module_init(most_init
);
1929 module_exit(most_exit
);
1930 MODULE_LICENSE("GPL");
1931 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1932 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");