]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/aoe/aoedev.c
aoe: associate frames with the AoE storage target
[mirror_ubuntu-bionic-kernel.git] / drivers / block / aoe / aoedev.c
CommitLineData
52e112b3 1/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
1da177e4
LT
2/*
3 * aoedev.c
4 * AoE device utility functions; maintains device list.
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/netdevice.h>
9bb237b6 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
1da177e4
LT
12#include "aoe.h"
13
262bf541
EC
14static void dummy_timer(ulong);
15static void aoedev_freedev(struct aoedev *);
9bb237b6
EC
16static void freetgt(struct aoedev *d, struct aoetgt *t);
17static void skbpoolfree(struct aoedev *d);
262bf541 18
1da177e4 19static struct aoedev *devlist;
476aed38 20static DEFINE_SPINLOCK(devlist_lock);
1da177e4 21
69cf2d85
EC
22/*
23 * Users who grab a pointer to the device with aoedev_by_aoeaddr or
24 * aoedev_by_sysminor_m automatically get a reference count and must
25 * be responsible for performing a aoedev_put. With the addition of
26 * async kthread processing I'm no longer confident that we can
27 * guarantee consistency in the face of device flushes.
28 *
29 * For the time being, we only bother to add extra references for
30 * frames sitting on the iocq. When the kthreads finish processing
31 * these frames, they will aoedev_put the device.
32 */
1da177e4 33struct aoedev *
32465c65 34aoedev_by_aoeaddr(int maj, int min)
1da177e4
LT
35{
36 struct aoedev *d;
37 ulong flags;
38
39 spin_lock_irqsave(&devlist_lock, flags);
40
41 for (d=devlist; d; d=d->next)
69cf2d85
EC
42 if (d->aoemajor == maj && d->aoeminor == min) {
43 d->ref++;
1da177e4 44 break;
69cf2d85 45 }
1da177e4
LT
46
47 spin_unlock_irqrestore(&devlist_lock, flags);
48 return d;
49}
50
69cf2d85
EC
51void
52aoedev_put(struct aoedev *d)
53{
54 ulong flags;
55
56 spin_lock_irqsave(&devlist_lock, flags);
57 d->ref--;
58 spin_unlock_irqrestore(&devlist_lock, flags);
59}
60
3ae1c24e
EC
61static void
62dummy_timer(ulong vp)
63{
64 struct aoedev *d;
65
66 d = (struct aoedev *)vp;
67 if (d->flags & DEVFL_TKILL)
68 return;
69 d->timer.expires = jiffies + HZ;
70 add_timer(&d->timer);
71}
72
69cf2d85
EC
73static void
74aoe_failip(struct aoedev *d)
1da177e4 75{
69cf2d85 76 struct request *rq;
1da177e4 77 struct bio *bio;
69cf2d85
EC
78 unsigned long n;
79
80 aoe_failbuf(d, d->ip.buf);
1da177e4 81
69cf2d85
EC
82 rq = d->ip.rq;
83 if (rq == NULL)
896831f5 84 return;
69cf2d85
EC
85 while ((bio = d->ip.nxbio)) {
86 clear_bit(BIO_UPTODATE, &bio->bi_flags);
87 d->ip.nxbio = bio->bi_next;
88 n = (unsigned long) rq->special;
89 rq->special = (void *) --n;
1da177e4 90 }
69cf2d85
EC
91 if ((unsigned long) rq->special == 0)
92 aoe_end_request(d, rq, 0);
896831f5
EC
93}
94
95void
96aoedev_downdev(struct aoedev *d)
97{
98 struct aoetgt *t, **tt, **te;
99 struct frame *f;
100 struct list_head *head, *pos, *nx;
69cf2d85 101 struct request *rq;
896831f5
EC
102 int i;
103
69cf2d85
EC
104 d->flags &= ~DEVFL_UP;
105
64a80f5a
EC
106 /* clean out active buffers */
107 for (i = 0; i < NFACTIVE; i++) {
108 head = &d->factive[i];
109 list_for_each_safe(pos, nx, head) {
110 f = list_entry(pos, struct frame, head);
111 list_del(pos);
112 if (f->buf) {
113 f->buf->nframesout--;
114 aoe_failbuf(d, f->buf);
115 }
116 aoe_freetframe(f);
117 }
118 }
119 /* reset window dressings */
896831f5
EC
120 tt = d->targets;
121 te = tt + NTARGETS;
122 for (; tt < te && (t = *tt); tt++) {
896831f5
EC
123 t->maxout = t->nframes;
124 t->nout = 0;
125 }
126
69cf2d85
EC
127 /* clean out the in-process request (if any) */
128 aoe_failip(d);
68e0d42f 129 d->htgt = NULL;
1da177e4 130
69cf2d85
EC
131 /* fast fail all pending I/O */
132 if (d->blkq) {
133 while ((rq = blk_peek_request(d->blkq))) {
134 blk_start_request(rq);
135 aoe_end_request(d, rq, 1);
136 }
1da177e4
LT
137 }
138
1da177e4 139 if (d->gd)
80795aef 140 set_capacity(d->gd, 0);
1da177e4
LT
141}
142
262bf541
EC
143static void
144aoedev_freedev(struct aoedev *d)
145{
146 struct aoetgt **t, **e;
147
5ad21a33 148 cancel_work_sync(&d->work);
262bf541
EC
149 if (d->gd) {
150 aoedisk_rm_sysfs(d);
151 del_gendisk(d->gd);
152 put_disk(d->gd);
69cf2d85 153 blk_cleanup_queue(d->blkq);
262bf541
EC
154 }
155 t = d->targets;
156 e = t + NTARGETS;
157 for (; t < e && *t; t++)
9bb237b6 158 freetgt(d, *t);
262bf541
EC
159 if (d->bufpool)
160 mempool_destroy(d->bufpool);
9bb237b6 161 skbpoolfree(d);
262bf541
EC
162 kfree(d);
163}
164
165int
166aoedev_flush(const char __user *str, size_t cnt)
167{
168 ulong flags;
169 struct aoedev *d, **dd;
170 struct aoedev *rmd = NULL;
171 char buf[16];
172 int all = 0;
173
174 if (cnt >= 3) {
175 if (cnt > sizeof buf)
176 cnt = sizeof buf;
177 if (copy_from_user(buf, str, cnt))
178 return -EFAULT;
179 all = !strncmp(buf, "all", 3);
180 }
181
262bf541
EC
182 spin_lock_irqsave(&devlist_lock, flags);
183 dd = &devlist;
184 while ((d = *dd)) {
185 spin_lock(&d->lock);
186 if ((!all && (d->flags & DEVFL_UP))
187 || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
69cf2d85
EC
188 || d->nopen
189 || d->ref) {
262bf541
EC
190 spin_unlock(&d->lock);
191 dd = &d->next;
192 continue;
193 }
194 *dd = d->next;
195 aoedev_downdev(d);
196 d->flags |= DEVFL_TKILL;
197 spin_unlock(&d->lock);
198 d->next = rmd;
199 rmd = d;
200 }
201 spin_unlock_irqrestore(&devlist_lock, flags);
202 while ((d = rmd)) {
203 rmd = d->next;
204 del_timer_sync(&d->timer);
205 aoedev_freedev(d); /* must be able to sleep */
206 }
207 return 0;
208}
209
69cf2d85
EC
210/* This has been confirmed to occur once with Tms=3*1000 due to the
211 * driver changing link and not processing its transmit ring. The
212 * problem is hard enough to solve by returning an error that I'm
213 * still punting on "solving" this.
214 */
9bb237b6
EC
215static void
216skbfree(struct sk_buff *skb)
217{
69cf2d85 218 enum { Sms = 250, Tms = 30 * 1000};
9bb237b6
EC
219 int i = Tms / Sms;
220
221 if (skb == NULL)
222 return;
223 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
224 msleep(Sms);
94873111 225 if (i < 0) {
9bb237b6
EC
226 printk(KERN_ERR
227 "aoe: %s holds ref: %s\n",
228 skb->dev ? skb->dev->name : "netif",
229 "cannot free skb -- memory leaked.");
230 return;
231 }
3d5b0605 232 skb->truesize -= skb->data_len;
9bb237b6
EC
233 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
234 skb_trim(skb, 0);
235 dev_kfree_skb(skb);
236}
237
238static void
239skbpoolfree(struct aoedev *d)
240{
e9bb8fb0 241 struct sk_buff *skb, *tmp;
9bb237b6 242
e9bb8fb0 243 skb_queue_walk_safe(&d->skbpool, skb, tmp)
9bb237b6 244 skbfree(skb);
e9bb8fb0
DM
245
246 __skb_queue_head_init(&d->skbpool);
9bb237b6
EC
247}
248
3ae1c24e 249/* find it or malloc it */
1da177e4 250struct aoedev *
68e0d42f 251aoedev_by_sysminor_m(ulong sysminor)
1da177e4
LT
252{
253 struct aoedev *d;
64a80f5a 254 int i;
1da177e4
LT
255 ulong flags;
256
257 spin_lock_irqsave(&devlist_lock, flags);
258
259 for (d=devlist; d; d=d->next)
69cf2d85
EC
260 if (d->sysminor == sysminor) {
261 d->ref++;
1da177e4 262 break;
69cf2d85 263 }
68e0d42f
EC
264 if (d)
265 goto out;
266 d = kcalloc(1, sizeof *d, GFP_ATOMIC);
267 if (!d)
268 goto out;
269 INIT_WORK(&d->work, aoecmd_sleepwork);
270 spin_lock_init(&d->lock);
e9bb8fb0 271 skb_queue_head_init(&d->skbpool);
68e0d42f
EC
272 init_timer(&d->timer);
273 d->timer.data = (ulong) d;
274 d->timer.function = dummy_timer;
275 d->timer.expires = jiffies + HZ;
276 add_timer(&d->timer);
277 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
278 d->tgt = d->targets;
69cf2d85 279 d->ref = 1;
64a80f5a
EC
280 for (i = 0; i < NFACTIVE; i++)
281 INIT_LIST_HEAD(&d->factive[i]);
68e0d42f
EC
282 d->sysminor = sysminor;
283 d->aoemajor = AOEMAJOR(sysminor);
284 d->aoeminor = AOEMINOR(sysminor);
285 d->mintimer = MINTIMER;
286 d->next = devlist;
287 devlist = d;
288 out:
3ae1c24e 289 spin_unlock_irqrestore(&devlist_lock, flags);
1da177e4
LT
290 return d;
291}
292
293static void
9bb237b6 294freetgt(struct aoedev *d, struct aoetgt *t)
1da177e4 295{
896831f5
EC
296 struct frame *f;
297 struct list_head *pos, *nx, *head;
e407a7f6 298
896831f5
EC
299 head = &t->ffree;
300 list_for_each_safe(pos, nx, head) {
301 list_del(pos);
302 f = list_entry(pos, struct frame, head);
9bb237b6 303 skbfree(f->skb);
896831f5
EC
304 kfree(f);
305 }
68e0d42f
EC
306 kfree(t);
307}
308
1da177e4
LT
309void
310aoedev_exit(void)
311{
312 struct aoedev *d;
313 ulong flags;
314
69cf2d85 315 aoe_flush_iocq();
1da177e4
LT
316 while ((d = devlist)) {
317 devlist = d->next;
318
319 spin_lock_irqsave(&d->lock, flags);
320 aoedev_downdev(d);
3ae1c24e 321 d->flags |= DEVFL_TKILL;
1da177e4
LT
322 spin_unlock_irqrestore(&d->lock, flags);
323
324 del_timer_sync(&d->timer);
325 aoedev_freedev(d);
326 }
327}
328
329int __init
330aoedev_init(void)
331{
1da177e4
LT
332 return 0;
333}