]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/hid/intel-ish-hid/ishtp/dma-if.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 288
[mirror_ubuntu-focal-kernel.git] / drivers / hid / intel-ish-hid / ishtp / dma-if.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ISHTP DMA I/F functions
4 *
5 * Copyright (c) 2003-2016, Intel Corporation.
6 */
7
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include "ishtp-dev.h"
14 #include "client.h"
15
16 /**
17 * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
18 * @dev: ishtp device
19 *
20 * Allocate RX and TX DMA buffer once during bus setup.
21 * It allocates 1MB, RX and TX DMA buffer, which are divided
22 * into slots.
23 */
24 void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
25 {
26 dma_addr_t h;
27
28 if (dev->ishtp_host_dma_tx_buf)
29 return;
30
31 dev->ishtp_host_dma_tx_buf_size = 1024*1024;
32 dev->ishtp_host_dma_rx_buf_size = 1024*1024;
33
34 /* Allocate Tx buffer and init usage bitmap */
35 dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
36 dev->ishtp_host_dma_tx_buf_size,
37 &h, GFP_KERNEL);
38 if (dev->ishtp_host_dma_tx_buf)
39 dev->ishtp_host_dma_tx_buf_phys = h;
40
41 dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
42 DMA_SLOT_SIZE;
43
44 dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
45 sizeof(uint8_t),
46 GFP_KERNEL);
47 spin_lock_init(&dev->ishtp_dma_tx_lock);
48
49 /* Allocate Rx buffer */
50 dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
51 dev->ishtp_host_dma_rx_buf_size,
52 &h, GFP_KERNEL);
53
54 if (dev->ishtp_host_dma_rx_buf)
55 dev->ishtp_host_dma_rx_buf_phys = h;
56 }
57
58 /**
59 * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
60 * @dev: ishtp device
61 *
62 * Free DMA buffer when all clients are released. This is
63 * only happens during error path in ISH built in driver
64 * model
65 */
66 void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
67 {
68 dma_addr_t h;
69
70 if (dev->ishtp_host_dma_tx_buf) {
71 h = dev->ishtp_host_dma_tx_buf_phys;
72 dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
73 dev->ishtp_host_dma_tx_buf, h);
74 }
75
76 if (dev->ishtp_host_dma_rx_buf) {
77 h = dev->ishtp_host_dma_rx_buf_phys;
78 dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
79 dev->ishtp_host_dma_rx_buf, h);
80 }
81
82 kfree(dev->ishtp_dma_tx_map);
83 dev->ishtp_host_dma_tx_buf = NULL;
84 dev->ishtp_host_dma_rx_buf = NULL;
85 dev->ishtp_dma_tx_map = NULL;
86 }
87
88 /*
89 * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
90 * @dev: ishtp device
91 * @size: Size of memory to get
92 *
93 * Find and return free address of "size" bytes in dma tx buffer.
94 * the function will mark this address as "in-used" memory.
95 *
96 * Return: NULL when no free buffer else a buffer to copy
97 */
98 void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
99 uint32_t size)
100 {
101 unsigned long flags;
102 int i, j, free;
103 /* additional slot is needed if there is rem */
104 int required_slots = (size / DMA_SLOT_SIZE)
105 + 1 * (size % DMA_SLOT_SIZE != 0);
106
107 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
108 for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
109 free = 1;
110 for (j = 0; j < required_slots; j++)
111 if (dev->ishtp_dma_tx_map[i+j]) {
112 free = 0;
113 i += j;
114 break;
115 }
116 if (free) {
117 /* mark memory as "caught" */
118 for (j = 0; j < required_slots; j++)
119 dev->ishtp_dma_tx_map[i+j] = 1;
120 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
121 return (i * DMA_SLOT_SIZE) +
122 (unsigned char *)dev->ishtp_host_dma_tx_buf;
123 }
124 }
125 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
126 dev_err(dev->devc, "No free DMA buffer to send msg\n");
127 return NULL;
128 }
129
130 /*
131 * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
132 * @dev: ishtp device
133 * @msg_addr: message address of slot
134 * @size: Size of memory to get
135 *
136 * Release_dma_acked_mem - returnes the acked memory to free list.
137 * (from msg_addr, size bytes long)
138 */
139 void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
140 void *msg_addr,
141 uint8_t size)
142 {
143 unsigned long flags;
144 int acked_slots = (size / DMA_SLOT_SIZE)
145 + 1 * (size % DMA_SLOT_SIZE != 0);
146 int i, j;
147
148 if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
149 dev_err(dev->devc, "Bad DMA Tx ack address\n");
150 return;
151 }
152
153 i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
154 spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
155 for (j = 0; j < acked_slots; j++) {
156 if ((i + j) >= dev->ishtp_dma_num_slots ||
157 !dev->ishtp_dma_tx_map[i+j]) {
158 /* no such slot, or memory is already free */
159 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
160 dev_err(dev->devc, "Bad DMA Tx ack address\n");
161 return;
162 }
163 dev->ishtp_dma_tx_map[i+j] = 0;
164 }
165 spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
166 }