]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/usb/gadget/udc/aspeed-vhub/ep0.c
usb: gadget: aspeed-vhub: Fix SETUP packets with OUT data phase
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / udc / aspeed-vhub / ep0.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4 *
5 * ep0.c - Endpoint 0 handling
6 *
7 * Copyright 2017 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/of.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
32
33 #include "vhub.h"
34
35 int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
36 {
37 struct usb_request *req = &ep->ep0.req.req;
38 int rc;
39
40 if (WARN_ON(ep->d_idx != 0))
41 return std_req_stall;
42 if (WARN_ON(!ep->ep0.dir_in))
43 return std_req_stall;
44 if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
45 return std_req_stall;
46 if (WARN_ON(req->status == -EINPROGRESS))
47 return std_req_stall;
48
49 req->buf = ptr;
50 req->length = len;
51 req->complete = NULL;
52 req->zero = true;
53
54 /*
55 * Call internal queue directly after dropping the lock. This is
56 * safe to do as the reply is always the last thing done when
57 * processing a SETUP packet, usually as a tail call
58 */
59 spin_unlock(&ep->vhub->lock);
60 if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
61 rc = std_req_stall;
62 else
63 rc = std_req_data;
64 spin_lock(&ep->vhub->lock);
65 return rc;
66 }
67
68 int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
69 {
70 u8 *buffer = ep->buf;
71 unsigned int i;
72 va_list args;
73
74 va_start(args, len);
75
76 /* Copy data directly into EP buffer */
77 for (i = 0; i < len; i++)
78 buffer[i] = va_arg(args, int);
79 va_end(args);
80
81 /* req->buf NULL means data is already there */
82 return ast_vhub_reply(ep, NULL, len);
83 }
84
85 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
86 {
87 struct usb_ctrlrequest crq;
88 enum std_req_rc std_req_rc;
89 int rc = -ENODEV;
90
91 if (WARN_ON(ep->d_idx != 0))
92 return;
93
94 /*
95 * Grab the setup packet from the chip and byteswap
96 * interesting fields
97 */
98 memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
99
100 EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101 crq.bRequestType, crq.bRequest,
102 le16_to_cpu(crq.wValue),
103 le16_to_cpu(crq.wIndex),
104 le16_to_cpu(crq.wLength),
105 (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
106 ep->ep0.state);
107
108 /* Check our state, cancel pending requests if needed */
109 if (ep->ep0.state != ep0_state_token) {
110 EPDBG(ep, "wrong state\n");
111 ast_vhub_nuke(ep, -EIO);
112
113 /*
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
116 * phase.
117 */
118 ast_vhub_nuke(ep, 0);
119 goto stall;
120 }
121
122 /* Calculate next state for EP0 */
123 ep->ep0.state = ep0_state_data;
124 ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
125
126 /* If this is the vHub, we handle requests differently */
127 std_req_rc = std_req_driver;
128 if (ep->dev == NULL) {
129 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
130 std_req_rc = ast_vhub_std_hub_request(ep, &crq);
131 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
132 std_req_rc = ast_vhub_class_hub_request(ep, &crq);
133 else
134 std_req_rc = std_req_stall;
135 } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
136 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
137
138 /* Act upon result */
139 switch(std_req_rc) {
140 case std_req_complete:
141 goto complete;
142 case std_req_stall:
143 goto stall;
144 case std_req_driver:
145 break;
146 case std_req_data:
147 return;
148 }
149
150 /* Pass request up to the gadget driver */
151 if (WARN_ON(!ep->dev))
152 goto stall;
153 if (ep->dev->driver) {
154 EPDBG(ep, "forwarding to gadget...\n");
155 spin_unlock(&ep->vhub->lock);
156 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
157 spin_lock(&ep->vhub->lock);
158 EPDBG(ep, "driver returned %d\n", rc);
159 } else {
160 EPDBG(ep, "no gadget for request !\n");
161 }
162 if (rc >= 0)
163 return;
164
165 stall:
166 EPDBG(ep, "stalling\n");
167 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
168 ep->ep0.state = ep0_state_status;
169 ep->ep0.dir_in = false;
170 return;
171
172 complete:
173 EPVDBG(ep, "sending [in] status with no data\n");
174 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
175 ep->ep0.state = ep0_state_status;
176 ep->ep0.dir_in = false;
177 }
178
179
180 static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
181 struct ast_vhub_req *req)
182 {
183 unsigned int chunk;
184 u32 reg;
185
186 /* If this is a 0-length request, it's the gadget trying to
187 * send a status on our behalf. We take it from here.
188 */
189 if (req->req.length == 0)
190 req->last_desc = 1;
191
192 /* Are we done ? Complete request, otherwise wait for next interrupt */
193 if (req->last_desc >= 0) {
194 EPVDBG(ep, "complete send %d/%d\n",
195 req->req.actual, req->req.length);
196 ep->ep0.state = ep0_state_status;
197 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
198 ast_vhub_done(ep, req, 0);
199 return;
200 }
201
202 /*
203 * Next chunk cropped to max packet size. Also check if this
204 * is the last packet
205 */
206 chunk = req->req.length - req->req.actual;
207 if (chunk > ep->ep.maxpacket)
208 chunk = ep->ep.maxpacket;
209 else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
210 req->last_desc = 1;
211
212 EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
213 chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
214
215 /*
216 * Copy data if any (internal requests already have data
217 * in the EP buffer)
218 */
219 if (chunk && req->req.buf)
220 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
221
222 /* Remember chunk size and trigger send */
223 reg = VHUB_EP0_SET_TX_LEN(chunk);
224 writel(reg, ep->ep0.ctlstat);
225 writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
226 req->req.actual += chunk;
227 }
228
229 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
230 {
231 EPVDBG(ep, "rx prime\n");
232
233 /* Prime endpoint for receiving data */
234 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
235 }
236
237 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
238 unsigned int len)
239 {
240 unsigned int remain;
241 int rc = 0;
242
243 /* We are receiving... grab request */
244 remain = req->req.length - req->req.actual;
245
246 EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
247
248 /* Are we getting more than asked ? */
249 if (len > remain) {
250 EPDBG(ep, "receiving too much (ovf: %d) !\n",
251 len - remain);
252 len = remain;
253 rc = -EOVERFLOW;
254 }
255 if (len && req->req.buf)
256 memcpy(req->req.buf + req->req.actual, ep->buf, len);
257 req->req.actual += len;
258
259 /* Done ? */
260 if (len < ep->ep.maxpacket || len == remain) {
261 ep->ep0.state = ep0_state_status;
262 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
263 ast_vhub_done(ep, req, rc);
264 } else
265 ast_vhub_ep0_rx_prime(ep);
266 }
267
268 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
269 {
270 struct ast_vhub_req *req;
271 struct ast_vhub *vhub = ep->vhub;
272 struct device *dev = &vhub->pdev->dev;
273 bool stall = false;
274 u32 stat;
275
276 /* Read EP0 status */
277 stat = readl(ep->ep0.ctlstat);
278
279 /* Grab current request if any */
280 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
281
282 EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
283 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
284
285 switch(ep->ep0.state) {
286 case ep0_state_token:
287 /* There should be no request queued in that state... */
288 if (req) {
289 dev_warn(dev, "request present while in TOKEN state\n");
290 ast_vhub_nuke(ep, -EINVAL);
291 }
292 dev_warn(dev, "ack while in TOKEN state\n");
293 stall = true;
294 break;
295 case ep0_state_data:
296 /* Check the state bits corresponding to our direction */
297 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
298 (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
299 (ep->ep0.dir_in != in_ack)) {
300 dev_warn(dev, "irq state mismatch");
301 stall = true;
302 break;
303 }
304 /*
305 * We are in data phase and there's no request, something is
306 * wrong, stall
307 */
308 if (!req) {
309 dev_warn(dev, "data phase, no request\n");
310 stall = true;
311 break;
312 }
313
314 /* We have a request, handle data transfers */
315 if (ep->ep0.dir_in)
316 ast_vhub_ep0_do_send(ep, req);
317 else
318 ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
319 return;
320 case ep0_state_status:
321 /* Nuke stale requests */
322 if (req) {
323 dev_warn(dev, "request present while in STATUS state\n");
324 ast_vhub_nuke(ep, -EINVAL);
325 }
326
327 /*
328 * If the status phase completes with the wrong ack, stall
329 * the endpoint just in case, to abort whatever the host
330 * was doing.
331 */
332 if (ep->ep0.dir_in == in_ack) {
333 dev_warn(dev, "status direction mismatch\n");
334 stall = true;
335 }
336 }
337
338 /* Reset to token state */
339 ep->ep0.state = ep0_state_token;
340 if (stall)
341 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
342 }
343
344 static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
345 gfp_t gfp_flags)
346 {
347 struct ast_vhub_req *req = to_ast_req(u_req);
348 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
349 struct ast_vhub *vhub = ep->vhub;
350 struct device *dev = &vhub->pdev->dev;
351 unsigned long flags;
352
353 /* Paranoid cheks */
354 if (!u_req || (!u_req->complete && !req->internal)) {
355 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
356 if (u_req) {
357 dev_warn(dev, "complete=%p internal=%d\n",
358 u_req->complete, req->internal);
359 }
360 return -EINVAL;
361 }
362
363 /* Not endpoint 0 ? */
364 if (WARN_ON(ep->d_idx != 0))
365 return -EINVAL;
366
367 /* Disabled device */
368 if (ep->dev && (!ep->dev->enabled || ep->dev->suspended))
369 return -ESHUTDOWN;
370
371 /* Data, no buffer and not internal ? */
372 if (u_req->length && !u_req->buf && !req->internal) {
373 dev_warn(dev, "Request with no buffer !\n");
374 return -EINVAL;
375 }
376
377 EPVDBG(ep, "enqueue req @%p\n", req);
378 EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
379 u_req->length, u_req->zero,
380 u_req->short_not_ok, ep->ep0.dir_in);
381
382 /* Initialize request progress fields */
383 u_req->status = -EINPROGRESS;
384 u_req->actual = 0;
385 req->last_desc = -1;
386 req->active = false;
387
388 spin_lock_irqsave(&vhub->lock, flags);
389
390 /* EP0 can only support a single request at a time */
391 if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) {
392 dev_warn(dev, "EP0: Request in wrong state\n");
393 spin_unlock_irqrestore(&vhub->lock, flags);
394 return -EBUSY;
395 }
396
397 /* Add request to list and kick processing if empty */
398 list_add_tail(&req->queue, &ep->queue);
399
400 if (ep->ep0.dir_in) {
401 /* IN request, send data */
402 ast_vhub_ep0_do_send(ep, req);
403 } else if (u_req->length == 0) {
404 /* 0-len request, send completion as rx */
405 EPVDBG(ep, "0-length rx completion\n");
406 ep->ep0.state = ep0_state_status;
407 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
408 ast_vhub_done(ep, req, 0);
409 } else {
410 /* OUT request, start receiver */
411 ast_vhub_ep0_rx_prime(ep);
412 }
413
414 spin_unlock_irqrestore(&vhub->lock, flags);
415
416 return 0;
417 }
418
419 static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
420 {
421 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
422 struct ast_vhub *vhub = ep->vhub;
423 struct ast_vhub_req *req;
424 unsigned long flags;
425 int rc = -EINVAL;
426
427 spin_lock_irqsave(&vhub->lock, flags);
428
429 /* Only one request can be in the queue */
430 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
431
432 /* Is it ours ? */
433 if (req && u_req == &req->req) {
434 EPVDBG(ep, "dequeue req @%p\n", req);
435
436 /*
437 * We don't have to deal with "active" as all
438 * DMAs go to the EP buffers, not the request.
439 */
440 ast_vhub_done(ep, req, -ECONNRESET);
441
442 /* We do stall the EP to clean things up in HW */
443 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
444 ep->ep0.state = ep0_state_status;
445 ep->ep0.dir_in = false;
446 rc = 0;
447 }
448 spin_unlock_irqrestore(&vhub->lock, flags);
449 return rc;
450 }
451
452
453 static const struct usb_ep_ops ast_vhub_ep0_ops = {
454 .queue = ast_vhub_ep0_queue,
455 .dequeue = ast_vhub_ep0_dequeue,
456 .alloc_request = ast_vhub_alloc_request,
457 .free_request = ast_vhub_free_request,
458 };
459
460 void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
461 struct ast_vhub_dev *dev)
462 {
463 memset(ep, 0, sizeof(*ep));
464
465 INIT_LIST_HEAD(&ep->ep.ep_list);
466 INIT_LIST_HEAD(&ep->queue);
467 ep->ep.ops = &ast_vhub_ep0_ops;
468 ep->ep.name = "ep0";
469 ep->ep.caps.type_control = true;
470 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
471 ep->d_idx = 0;
472 ep->dev = dev;
473 ep->vhub = vhub;
474 ep->ep0.state = ep0_state_token;
475 INIT_LIST_HEAD(&ep->ep0.req.queue);
476 ep->ep0.req.internal = true;
477
478 /* Small difference between vHub and devices */
479 if (dev) {
480 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
481 ep->ep0.setup = vhub->regs +
482 AST_VHUB_SETUP0 + 8 * (dev->index + 1);
483 ep->buf = vhub->ep0_bufs +
484 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
485 ep->buf_dma = vhub->ep0_bufs_dma +
486 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
487 } else {
488 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
489 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
490 ep->buf = vhub->ep0_bufs;
491 ep->buf_dma = vhub->ep0_bufs_dma;
492 }
493 }