]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/sh/kernel/cpu/sh4/sq.c
[PATCH] slab: remove kmem_cache_t
[mirror_ubuntu-bionic-kernel.git] / arch / sh / kernel / cpu / sh4 / sq.c
1 /*
2 * arch/sh/kernel/cpu/sh4/sq.c
3 *
4 * General management API for SH-4 integrated Store Queues
5 *
6 * Copyright (C) 2001 - 2006 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13 #include <linux/init.h>
14 #include <linux/cpu.h>
15 #include <linux/bitmap.h>
16 #include <linux/sysdev.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mm.h>
22 #include <linux/io.h>
23 #include <asm/page.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cpu/sq.h>
26
27 struct sq_mapping;
28
29 struct sq_mapping {
30 const char *name;
31
32 unsigned long sq_addr;
33 unsigned long addr;
34 unsigned int size;
35
36 struct sq_mapping *next;
37 };
38
39 static struct sq_mapping *sq_mapping_list;
40 static DEFINE_SPINLOCK(sq_mapping_lock);
41 static struct kmem_cache *sq_cache;
42 static unsigned long *sq_bitmap;
43
44 #define store_queue_barrier() \
45 do { \
46 (void)ctrl_inl(P4SEG_STORE_QUE); \
47 ctrl_outl(0, P4SEG_STORE_QUE + 0); \
48 ctrl_outl(0, P4SEG_STORE_QUE + 8); \
49 } while (0);
50
51 /**
52 * sq_flush_range - Flush (prefetch) a specific SQ range
53 * @start: the store queue address to start flushing from
54 * @len: the length to flush
55 *
56 * Flushes the store queue cache from @start to @start + @len in a
57 * linear fashion.
58 */
59 void sq_flush_range(unsigned long start, unsigned int len)
60 {
61 volatile unsigned long *sq = (unsigned long *)start;
62
63 /* Flush the queues */
64 for (len >>= 5; len--; sq += 8)
65 prefetchw((void *)sq);
66
67 /* Wait for completion */
68 store_queue_barrier();
69 }
70 EXPORT_SYMBOL(sq_flush_range);
71
72 static inline void sq_mapping_list_add(struct sq_mapping *map)
73 {
74 struct sq_mapping **p, *tmp;
75
76 spin_lock_irq(&sq_mapping_lock);
77
78 p = &sq_mapping_list;
79 while ((tmp = *p) != NULL)
80 p = &tmp->next;
81
82 map->next = tmp;
83 *p = map;
84
85 spin_unlock_irq(&sq_mapping_lock);
86 }
87
88 static inline void sq_mapping_list_del(struct sq_mapping *map)
89 {
90 struct sq_mapping **p, *tmp;
91
92 spin_lock_irq(&sq_mapping_lock);
93
94 for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
95 if (tmp == map) {
96 *p = tmp->next;
97 break;
98 }
99
100 spin_unlock_irq(&sq_mapping_lock);
101 }
102
103 static int __sq_remap(struct sq_mapping *map, unsigned long flags)
104 {
105 #if defined(CONFIG_MMU)
106 struct vm_struct *vma;
107
108 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
109 if (!vma)
110 return -ENOMEM;
111
112 vma->phys_addr = map->addr;
113
114 if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
115 map->size, flags)) {
116 vunmap(vma->addr);
117 return -EAGAIN;
118 }
119 #else
120 /*
121 * Without an MMU (or with it turned off), this is much more
122 * straightforward, as we can just load up each queue's QACR with
123 * the physical address appropriately masked.
124 */
125 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
126 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
127 #endif
128
129 return 0;
130 }
131
132 /**
133 * sq_remap - Map a physical address through the Store Queues
134 * @phys: Physical address of mapping.
135 * @size: Length of mapping.
136 * @name: User invoking mapping.
137 * @flags: Protection flags.
138 *
139 * Remaps the physical address @phys through the next available store queue
140 * address of @size length. @name is logged at boot time as well as through
141 * the sysfs interface.
142 */
143 unsigned long sq_remap(unsigned long phys, unsigned int size,
144 const char *name, unsigned long flags)
145 {
146 struct sq_mapping *map;
147 unsigned long end;
148 unsigned int psz;
149 int ret, page;
150
151 /* Don't allow wraparound or zero size */
152 end = phys + size - 1;
153 if (unlikely(!size || end < phys))
154 return -EINVAL;
155 /* Don't allow anyone to remap normal memory.. */
156 if (unlikely(phys < virt_to_phys(high_memory)))
157 return -EINVAL;
158
159 phys &= PAGE_MASK;
160 size = PAGE_ALIGN(end + 1) - phys;
161
162 map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
163 if (unlikely(!map))
164 return -ENOMEM;
165
166 map->addr = phys;
167 map->size = size;
168 map->name = name;
169
170 page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
171 get_order(map->size));
172 if (unlikely(page < 0)) {
173 ret = -ENOSPC;
174 goto out;
175 }
176
177 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
178
179 ret = __sq_remap(map, flags);
180 if (unlikely(ret != 0))
181 goto out;
182
183 psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
185 likely(map->name) ? map->name : "???",
186 psz, psz == 1 ? " " : "s",
187 map->sq_addr, map->addr);
188
189 sq_mapping_list_add(map);
190
191 return map->sq_addr;
192
193 out:
194 kmem_cache_free(sq_cache, map);
195 return ret;
196 }
197 EXPORT_SYMBOL(sq_remap);
198
199 /**
200 * sq_unmap - Unmap a Store Queue allocation
201 * @map: Pre-allocated Store Queue mapping.
202 *
203 * Unmaps the store queue allocation @map that was previously created by
204 * sq_remap(). Also frees up the pte that was previously inserted into
205 * the kernel page table and discards the UTLB translation.
206 */
207 void sq_unmap(unsigned long vaddr)
208 {
209 struct sq_mapping **p, *map;
210 struct vm_struct *vma;
211 int page;
212
213 for (p = &sq_mapping_list; (map = *p); p = &map->next)
214 if (map->sq_addr == vaddr)
215 break;
216
217 if (unlikely(!map)) {
218 printk("%s: bad store queue address 0x%08lx\n",
219 __FUNCTION__, vaddr);
220 return;
221 }
222
223 page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
224 bitmap_release_region(sq_bitmap, page, get_order(map->size));
225
226 #ifdef CONFIG_MMU
227 vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
228 if (!vma) {
229 printk(KERN_ERR "%s: bad address 0x%08lx\n",
230 __FUNCTION__, map->sq_addr);
231 return;
232 }
233 #endif
234
235 sq_mapping_list_del(map);
236
237 kmem_cache_free(sq_cache, map);
238 }
239 EXPORT_SYMBOL(sq_unmap);
240
241 /*
242 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
243 * there is any other easy way to add things on a per-cpu basis without
244 * putting the directory entries somewhere stupid and having to create
245 * links in sysfs by hand back in to the per-cpu directories.
246 *
247 * Some day we may want to have an additional abstraction per store
248 * queue, but considering the kobject hell we already have to deal with,
249 * it's simply not worth the trouble.
250 */
251 static struct kobject *sq_kobject[NR_CPUS];
252
253 struct sq_sysfs_attr {
254 struct attribute attr;
255 ssize_t (*show)(char *buf);
256 ssize_t (*store)(const char *buf, size_t count);
257 };
258
259 #define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr)
260
261 static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
262 char *buf)
263 {
264 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
265
266 if (likely(sattr->show))
267 return sattr->show(buf);
268
269 return -EIO;
270 }
271
272 static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
273 const char *buf, size_t count)
274 {
275 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
276
277 if (likely(sattr->store))
278 return sattr->store(buf, count);
279
280 return -EIO;
281 }
282
283 static ssize_t mapping_show(char *buf)
284 {
285 struct sq_mapping **list, *entry;
286 char *p = buf;
287
288 for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
289 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
290 entry->sq_addr, entry->sq_addr + entry->size,
291 entry->addr, entry->name);
292
293 return p - buf;
294 }
295
296 static ssize_t mapping_store(const char *buf, size_t count)
297 {
298 unsigned long base = 0, len = 0;
299
300 sscanf(buf, "%lx %lx", &base, &len);
301 if (!base)
302 return -EIO;
303
304 if (likely(len)) {
305 int ret = sq_remap(base, len, "Userspace",
306 pgprot_val(PAGE_SHARED));
307 if (ret < 0)
308 return ret;
309 } else
310 sq_unmap(base);
311
312 return count;
313 }
314
315 static struct sq_sysfs_attr mapping_attr =
316 __ATTR(mapping, 0644, mapping_show, mapping_store);
317
318 static struct attribute *sq_sysfs_attrs[] = {
319 &mapping_attr.attr,
320 NULL,
321 };
322
323 static struct sysfs_ops sq_sysfs_ops = {
324 .show = sq_sysfs_show,
325 .store = sq_sysfs_store,
326 };
327
328 static struct kobj_type ktype_percpu_entry = {
329 .sysfs_ops = &sq_sysfs_ops,
330 .default_attrs = sq_sysfs_attrs,
331 };
332
333 static int __devinit sq_sysdev_add(struct sys_device *sysdev)
334 {
335 unsigned int cpu = sysdev->id;
336 struct kobject *kobj;
337
338 sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
339 if (unlikely(!sq_kobject[cpu]))
340 return -ENOMEM;
341
342 kobj = sq_kobject[cpu];
343 kobj->parent = &sysdev->kobj;
344 kobject_set_name(kobj, "%s", "sq");
345 kobj->ktype = &ktype_percpu_entry;
346
347 return kobject_register(kobj);
348 }
349
350 static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
351 {
352 unsigned int cpu = sysdev->id;
353 struct kobject *kobj = sq_kobject[cpu];
354
355 kobject_unregister(kobj);
356 return 0;
357 }
358
359 static struct sysdev_driver sq_sysdev_driver = {
360 .add = sq_sysdev_add,
361 .remove = __devexit_p(sq_sysdev_remove),
362 };
363
364 static int __init sq_api_init(void)
365 {
366 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
367 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
368 int ret = -ENOMEM;
369
370 printk(KERN_NOTICE "sq: Registering store queue API.\n");
371
372 sq_cache = kmem_cache_create("store_queue_cache",
373 sizeof(struct sq_mapping), 0, 0,
374 NULL, NULL);
375 if (unlikely(!sq_cache))
376 return ret;
377
378 sq_bitmap = kzalloc(size, GFP_KERNEL);
379 if (unlikely(!sq_bitmap))
380 goto out;
381
382 ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
383 if (unlikely(ret != 0))
384 goto out;
385
386 return 0;
387
388 out:
389 kfree(sq_bitmap);
390 kmem_cache_destroy(sq_cache);
391
392 return ret;
393 }
394
395 static void __exit sq_api_exit(void)
396 {
397 sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
398 kfree(sq_bitmap);
399 kmem_cache_destroy(sq_cache);
400 }
401
402 module_init(sq_api_init);
403 module_exit(sq_api_exit);
404
405 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
406 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
407 MODULE_LICENSE("GPL");