1 // SPDX-License-Identifier: GPL-2.0+
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
5 * ep0.c - Endpoint 0 handling
7 * Copyright 2017 IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
35 int ast_vhub_reply(struct ast_vhub_ep
*ep
, char *ptr
, int len
)
37 struct usb_request
*req
= &ep
->ep0
.req
.req
;
40 if (WARN_ON(ep
->d_idx
!= 0))
42 if (WARN_ON(!ep
->ep0
.dir_in
))
44 if (WARN_ON(len
> AST_VHUB_EP0_MAX_PACKET
))
46 if (WARN_ON(req
->status
== -EINPROGRESS
))
55 * Call internal queue directly after dropping the lock. This is
56 * safe to do as the reply is always the last thing done when
57 * processing a SETUP packet, usually as a tail call
59 spin_unlock(&ep
->vhub
->lock
);
60 if (ep
->ep
.ops
->queue(&ep
->ep
, req
, GFP_ATOMIC
))
64 spin_lock(&ep
->vhub
->lock
);
68 int __ast_vhub_simple_reply(struct ast_vhub_ep
*ep
, int len
, ...)
76 /* Copy data directly into EP buffer */
77 for (i
= 0; i
< len
; i
++)
78 buffer
[i
] = va_arg(args
, int);
81 /* req->buf NULL means data is already there */
82 return ast_vhub_reply(ep
, NULL
, len
);
85 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep
*ep
)
87 struct usb_ctrlrequest crq
;
88 enum std_req_rc std_req_rc
;
91 if (WARN_ON(ep
->d_idx
!= 0))
95 * Grab the setup packet from the chip and byteswap
98 memcpy_fromio(&crq
, ep
->ep0
.setup
, sizeof(crq
));
100 EPDBG(ep
, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101 crq
.bRequestType
, crq
.bRequest
,
102 le16_to_cpu(crq
.wValue
),
103 le16_to_cpu(crq
.wIndex
),
104 le16_to_cpu(crq
.wLength
),
105 (crq
.bRequestType
& USB_DIR_IN
) ? "in" : "out",
108 /* Check our state, cancel pending requests if needed */
109 if (ep
->ep0
.state
!= ep0_state_token
) {
110 EPDBG(ep
, "wrong state\n");
111 ast_vhub_nuke(ep
, -EIO
);
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
118 ast_vhub_nuke(ep
, 0);
122 /* Calculate next state for EP0 */
123 ep
->ep0
.state
= ep0_state_data
;
124 ep
->ep0
.dir_in
= !!(crq
.bRequestType
& USB_DIR_IN
);
126 /* If this is the vHub, we handle requests differently */
127 std_req_rc
= std_req_driver
;
128 if (ep
->dev
== NULL
) {
129 if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
)
130 std_req_rc
= ast_vhub_std_hub_request(ep
, &crq
);
131 else if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_CLASS
)
132 std_req_rc
= ast_vhub_class_hub_request(ep
, &crq
);
134 std_req_rc
= std_req_stall
;
135 } else if ((crq
.bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
)
136 std_req_rc
= ast_vhub_std_dev_request(ep
, &crq
);
138 /* Act upon result */
140 case std_req_complete
:
150 /* Pass request up to the gadget driver */
151 if (WARN_ON(!ep
->dev
))
153 if (ep
->dev
->driver
) {
154 EPDBG(ep
, "forwarding to gadget...\n");
155 spin_unlock(&ep
->vhub
->lock
);
156 rc
= ep
->dev
->driver
->setup(&ep
->dev
->gadget
, &crq
);
157 spin_lock(&ep
->vhub
->lock
);
158 EPDBG(ep
, "driver returned %d\n", rc
);
160 EPDBG(ep
, "no gadget for request !\n");
166 EPDBG(ep
, "stalling\n");
167 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
168 ep
->ep0
.state
= ep0_state_status
;
169 ep
->ep0
.dir_in
= false;
173 EPVDBG(ep
, "sending [in] status with no data\n");
174 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
175 ep
->ep0
.state
= ep0_state_status
;
176 ep
->ep0
.dir_in
= false;
180 static void ast_vhub_ep0_do_send(struct ast_vhub_ep
*ep
,
181 struct ast_vhub_req
*req
)
186 /* If this is a 0-length request, it's the gadget trying to
187 * send a status on our behalf. We take it from here.
189 if (req
->req
.length
== 0)
192 /* Are we done ? Complete request, otherwise wait for next interrupt */
193 if (req
->last_desc
>= 0) {
194 EPVDBG(ep
, "complete send %d/%d\n",
195 req
->req
.actual
, req
->req
.length
);
196 ep
->ep0
.state
= ep0_state_status
;
197 writel(VHUB_EP0_RX_BUFF_RDY
, ep
->ep0
.ctlstat
);
198 ast_vhub_done(ep
, req
, 0);
203 * Next chunk cropped to max packet size. Also check if this
206 chunk
= req
->req
.length
- req
->req
.actual
;
207 if (chunk
> ep
->ep
.maxpacket
)
208 chunk
= ep
->ep
.maxpacket
;
209 else if ((chunk
< ep
->ep
.maxpacket
) || !req
->req
.zero
)
212 EPVDBG(ep
, "send chunk=%d last=%d, req->act=%d mp=%d\n",
213 chunk
, req
->last_desc
, req
->req
.actual
, ep
->ep
.maxpacket
);
216 * Copy data if any (internal requests already have data
219 if (chunk
&& req
->req
.buf
)
220 memcpy(ep
->buf
, req
->req
.buf
+ req
->req
.actual
, chunk
);
222 /* Remember chunk size and trigger send */
223 reg
= VHUB_EP0_SET_TX_LEN(chunk
);
224 writel(reg
, ep
->ep0
.ctlstat
);
225 writel(reg
| VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
226 req
->req
.actual
+= chunk
;
229 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep
*ep
)
231 EPVDBG(ep
, "rx prime\n");
233 /* Prime endpoint for receiving data */
234 writel(VHUB_EP0_RX_BUFF_RDY
, ep
->ep0
.ctlstat
);
237 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep
*ep
, struct ast_vhub_req
*req
,
243 /* We are receiving... grab request */
244 remain
= req
->req
.length
- req
->req
.actual
;
246 EPVDBG(ep
, "receive got=%d remain=%d\n", len
, remain
);
248 /* Are we getting more than asked ? */
250 EPDBG(ep
, "receiving too much (ovf: %d) !\n",
255 if (len
&& req
->req
.buf
)
256 memcpy(req
->req
.buf
+ req
->req
.actual
, ep
->buf
, len
);
257 req
->req
.actual
+= len
;
260 if (len
< ep
->ep
.maxpacket
|| len
== remain
) {
261 ep
->ep0
.state
= ep0_state_status
;
262 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
263 ast_vhub_done(ep
, req
, rc
);
265 ast_vhub_ep0_rx_prime(ep
);
268 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep
*ep
, bool in_ack
)
270 struct ast_vhub_req
*req
;
271 struct ast_vhub
*vhub
= ep
->vhub
;
272 struct device
*dev
= &vhub
->pdev
->dev
;
276 /* Read EP0 status */
277 stat
= readl(ep
->ep0
.ctlstat
);
279 /* Grab current request if any */
280 req
= list_first_entry_or_null(&ep
->queue
, struct ast_vhub_req
, queue
);
282 EPVDBG(ep
, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
283 stat
, ep
->ep0
.state
, ep
->ep0
.dir_in
, in_ack
, req
);
285 switch(ep
->ep0
.state
) {
286 case ep0_state_token
:
287 /* There should be no request queued in that state... */
289 dev_warn(dev
, "request present while in TOKEN state\n");
290 ast_vhub_nuke(ep
, -EINVAL
);
292 dev_warn(dev
, "ack while in TOKEN state\n");
296 /* Check the state bits corresponding to our direction */
297 if ((ep
->ep0
.dir_in
&& (stat
& VHUB_EP0_TX_BUFF_RDY
)) ||
298 (!ep
->ep0
.dir_in
&& (stat
& VHUB_EP0_RX_BUFF_RDY
)) ||
299 (ep
->ep0
.dir_in
!= in_ack
)) {
300 dev_warn(dev
, "irq state mismatch");
305 * We are in data phase and there's no request, something is
309 dev_warn(dev
, "data phase, no request\n");
314 /* We have a request, handle data transfers */
316 ast_vhub_ep0_do_send(ep
, req
);
318 ast_vhub_ep0_do_receive(ep
, req
, VHUB_EP0_RX_LEN(stat
));
320 case ep0_state_status
:
321 /* Nuke stale requests */
323 dev_warn(dev
, "request present while in STATUS state\n");
324 ast_vhub_nuke(ep
, -EINVAL
);
328 * If the status phase completes with the wrong ack, stall
329 * the endpoint just in case, to abort whatever the host
332 if (ep
->ep0
.dir_in
== in_ack
) {
333 dev_warn(dev
, "status direction mismatch\n");
338 /* Reset to token state */
339 ep
->ep0
.state
= ep0_state_token
;
341 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
344 static int ast_vhub_ep0_queue(struct usb_ep
* u_ep
, struct usb_request
*u_req
,
347 struct ast_vhub_req
*req
= to_ast_req(u_req
);
348 struct ast_vhub_ep
*ep
= to_ast_ep(u_ep
);
349 struct ast_vhub
*vhub
= ep
->vhub
;
350 struct device
*dev
= &vhub
->pdev
->dev
;
354 if (!u_req
|| (!u_req
->complete
&& !req
->internal
)) {
355 dev_warn(dev
, "Bogus EP0 request ! u_req=%p\n", u_req
);
357 dev_warn(dev
, "complete=%p internal=%d\n",
358 u_req
->complete
, req
->internal
);
363 /* Not endpoint 0 ? */
364 if (WARN_ON(ep
->d_idx
!= 0))
367 /* Disabled device */
368 if (ep
->dev
&& (!ep
->dev
->enabled
|| ep
->dev
->suspended
))
371 /* Data, no buffer and not internal ? */
372 if (u_req
->length
&& !u_req
->buf
&& !req
->internal
) {
373 dev_warn(dev
, "Request with no buffer !\n");
377 EPVDBG(ep
, "enqueue req @%p\n", req
);
378 EPVDBG(ep
, " l=%d zero=%d noshort=%d is_in=%d\n",
379 u_req
->length
, u_req
->zero
,
380 u_req
->short_not_ok
, ep
->ep0
.dir_in
);
382 /* Initialize request progress fields */
383 u_req
->status
= -EINPROGRESS
;
388 spin_lock_irqsave(&vhub
->lock
, flags
);
390 /* EP0 can only support a single request at a time */
391 if (!list_empty(&ep
->queue
) || ep
->ep0
.state
== ep0_state_token
) {
392 dev_warn(dev
, "EP0: Request in wrong state\n");
393 spin_unlock_irqrestore(&vhub
->lock
, flags
);
397 /* Add request to list and kick processing if empty */
398 list_add_tail(&req
->queue
, &ep
->queue
);
400 if (ep
->ep0
.dir_in
) {
401 /* IN request, send data */
402 ast_vhub_ep0_do_send(ep
, req
);
403 } else if (u_req
->length
== 0) {
404 /* 0-len request, send completion as rx */
405 EPVDBG(ep
, "0-length rx completion\n");
406 ep
->ep0
.state
= ep0_state_status
;
407 writel(VHUB_EP0_TX_BUFF_RDY
, ep
->ep0
.ctlstat
);
408 ast_vhub_done(ep
, req
, 0);
410 /* OUT request, start receiver */
411 ast_vhub_ep0_rx_prime(ep
);
414 spin_unlock_irqrestore(&vhub
->lock
, flags
);
419 static int ast_vhub_ep0_dequeue(struct usb_ep
* u_ep
, struct usb_request
*u_req
)
421 struct ast_vhub_ep
*ep
= to_ast_ep(u_ep
);
422 struct ast_vhub
*vhub
= ep
->vhub
;
423 struct ast_vhub_req
*req
;
427 spin_lock_irqsave(&vhub
->lock
, flags
);
429 /* Only one request can be in the queue */
430 req
= list_first_entry_or_null(&ep
->queue
, struct ast_vhub_req
, queue
);
433 if (req
&& u_req
== &req
->req
) {
434 EPVDBG(ep
, "dequeue req @%p\n", req
);
437 * We don't have to deal with "active" as all
438 * DMAs go to the EP buffers, not the request.
440 ast_vhub_done(ep
, req
, -ECONNRESET
);
442 /* We do stall the EP to clean things up in HW */
443 writel(VHUB_EP0_CTRL_STALL
, ep
->ep0
.ctlstat
);
444 ep
->ep0
.state
= ep0_state_status
;
445 ep
->ep0
.dir_in
= false;
448 spin_unlock_irqrestore(&vhub
->lock
, flags
);
453 static const struct usb_ep_ops ast_vhub_ep0_ops
= {
454 .queue
= ast_vhub_ep0_queue
,
455 .dequeue
= ast_vhub_ep0_dequeue
,
456 .alloc_request
= ast_vhub_alloc_request
,
457 .free_request
= ast_vhub_free_request
,
460 void ast_vhub_init_ep0(struct ast_vhub
*vhub
, struct ast_vhub_ep
*ep
,
461 struct ast_vhub_dev
*dev
)
463 memset(ep
, 0, sizeof(*ep
));
465 INIT_LIST_HEAD(&ep
->ep
.ep_list
);
466 INIT_LIST_HEAD(&ep
->queue
);
467 ep
->ep
.ops
= &ast_vhub_ep0_ops
;
469 ep
->ep
.caps
.type_control
= true;
470 usb_ep_set_maxpacket_limit(&ep
->ep
, AST_VHUB_EP0_MAX_PACKET
);
474 ep
->ep0
.state
= ep0_state_token
;
475 INIT_LIST_HEAD(&ep
->ep0
.req
.queue
);
476 ep
->ep0
.req
.internal
= true;
478 /* Small difference between vHub and devices */
480 ep
->ep0
.ctlstat
= dev
->regs
+ AST_VHUB_DEV_EP0_CTRL
;
481 ep
->ep0
.setup
= vhub
->regs
+
482 AST_VHUB_SETUP0
+ 8 * (dev
->index
+ 1);
483 ep
->buf
= vhub
->ep0_bufs
+
484 AST_VHUB_EP0_MAX_PACKET
* (dev
->index
+ 1);
485 ep
->buf_dma
= vhub
->ep0_bufs_dma
+
486 AST_VHUB_EP0_MAX_PACKET
* (dev
->index
+ 1);
488 ep
->ep0
.ctlstat
= vhub
->regs
+ AST_VHUB_EP0_CTRL
;
489 ep
->ep0
.setup
= vhub
->regs
+ AST_VHUB_SETUP0
;
490 ep
->buf
= vhub
->ep0_bufs
;
491 ep
->buf_dma
= vhub
->ep0_bufs_dma
;