]>
Commit | Line | Data |
---|---|---|
63b94509 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/pci_ids.h> | |
17 | #include <linux/kthread.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/ccp.h> | |
23 | ||
24 | #include "ccp-dev.h" | |
25 | ||
26 | #define IO_BAR 2 | |
27 | #define MSIX_VECTORS 2 | |
28 | ||
29 | struct ccp_msix { | |
30 | u32 vector; | |
31 | char name[16]; | |
32 | }; | |
33 | ||
34 | struct ccp_pci { | |
35 | int msix_count; | |
36 | struct ccp_msix msix[MSIX_VECTORS]; | |
37 | }; | |
38 | ||
39 | static int ccp_get_msix_irqs(struct ccp_device *ccp) | |
40 | { | |
41 | struct ccp_pci *ccp_pci = ccp->dev_specific; | |
42 | struct device *dev = ccp->dev; | |
43 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | |
44 | struct msix_entry msix_entry[MSIX_VECTORS]; | |
45 | unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; | |
46 | int v, ret; | |
47 | ||
48 | for (v = 0; v < ARRAY_SIZE(msix_entry); v++) | |
49 | msix_entry[v].entry = v; | |
50 | ||
51 | while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0) | |
52 | v = ret; | |
53 | if (ret) | |
54 | return ret; | |
55 | ||
56 | ccp_pci->msix_count = v; | |
57 | for (v = 0; v < ccp_pci->msix_count; v++) { | |
58 | /* Set the interrupt names and request the irqs */ | |
59 | snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); | |
60 | ccp_pci->msix[v].vector = msix_entry[v].vector; | |
61 | ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, | |
62 | 0, ccp_pci->msix[v].name, dev); | |
63 | if (ret) { | |
64 | dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", | |
65 | ret); | |
66 | goto e_irq; | |
67 | } | |
68 | } | |
69 | ||
70 | return 0; | |
71 | ||
72 | e_irq: | |
73 | while (v--) | |
74 | free_irq(ccp_pci->msix[v].vector, dev); | |
75 | ||
76 | pci_disable_msix(pdev); | |
77 | ||
78 | ccp_pci->msix_count = 0; | |
79 | ||
80 | return ret; | |
81 | } | |
82 | ||
83 | static int ccp_get_msi_irq(struct ccp_device *ccp) | |
84 | { | |
85 | struct device *dev = ccp->dev; | |
86 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | |
87 | int ret; | |
88 | ||
89 | ret = pci_enable_msi(pdev); | |
90 | if (ret) | |
91 | return ret; | |
92 | ||
93 | ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev); | |
94 | if (ret) { | |
95 | dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); | |
96 | goto e_msi; | |
97 | } | |
98 | ||
99 | return 0; | |
100 | ||
101 | e_msi: | |
102 | pci_disable_msi(pdev); | |
103 | ||
104 | return ret; | |
105 | } | |
106 | ||
107 | static int ccp_get_irqs(struct ccp_device *ccp) | |
108 | { | |
109 | struct device *dev = ccp->dev; | |
110 | int ret; | |
111 | ||
112 | ret = ccp_get_msix_irqs(ccp); | |
113 | if (!ret) | |
114 | return 0; | |
115 | ||
116 | /* Couldn't get MSI-X vectors, try MSI */ | |
117 | dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); | |
118 | ret = ccp_get_msi_irq(ccp); | |
119 | if (!ret) | |
120 | return 0; | |
121 | ||
122 | /* Couldn't get MSI interrupt */ | |
123 | dev_notice(dev, "could not enable MSI (%d)\n", ret); | |
124 | ||
125 | return ret; | |
126 | } | |
127 | ||
128 | static void ccp_free_irqs(struct ccp_device *ccp) | |
129 | { | |
130 | struct ccp_pci *ccp_pci = ccp->dev_specific; | |
131 | struct device *dev = ccp->dev; | |
132 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | |
133 | ||
134 | if (ccp_pci->msix_count) { | |
135 | while (ccp_pci->msix_count--) | |
136 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, | |
137 | dev); | |
138 | pci_disable_msix(pdev); | |
139 | } else { | |
140 | free_irq(pdev->irq, dev); | |
141 | pci_disable_msi(pdev); | |
142 | } | |
143 | } | |
144 | ||
145 | static int ccp_find_mmio_area(struct ccp_device *ccp) | |
146 | { | |
147 | struct device *dev = ccp->dev; | |
148 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | |
149 | resource_size_t io_len; | |
150 | unsigned long io_flags; | |
151 | int bar; | |
152 | ||
153 | io_flags = pci_resource_flags(pdev, IO_BAR); | |
154 | io_len = pci_resource_len(pdev, IO_BAR); | |
155 | if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) | |
156 | return IO_BAR; | |
157 | ||
158 | for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) { | |
159 | io_flags = pci_resource_flags(pdev, bar); | |
160 | io_len = pci_resource_len(pdev, bar); | |
161 | if ((io_flags & IORESOURCE_MEM) && | |
162 | (io_len >= (IO_OFFSET + 0x800))) | |
163 | return bar; | |
164 | } | |
165 | ||
166 | return -EIO; | |
167 | } | |
168 | ||
169 | static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
170 | { | |
171 | struct ccp_device *ccp; | |
172 | struct ccp_pci *ccp_pci; | |
173 | struct device *dev = &pdev->dev; | |
174 | unsigned int bar; | |
175 | int ret; | |
176 | ||
177 | ret = -ENOMEM; | |
178 | ccp = ccp_alloc_struct(dev); | |
179 | if (!ccp) | |
180 | goto e_err; | |
181 | ||
182 | ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL); | |
183 | if (!ccp_pci) { | |
184 | ret = -ENOMEM; | |
185 | goto e_free1; | |
186 | } | |
187 | ccp->dev_specific = ccp_pci; | |
188 | ccp->get_irq = ccp_get_irqs; | |
189 | ccp->free_irq = ccp_free_irqs; | |
190 | ||
191 | ret = pci_request_regions(pdev, "ccp"); | |
192 | if (ret) { | |
193 | dev_err(dev, "pci_request_regions failed (%d)\n", ret); | |
194 | goto e_free2; | |
195 | } | |
196 | ||
197 | ret = pci_enable_device(pdev); | |
198 | if (ret) { | |
199 | dev_err(dev, "pci_enable_device failed (%d)\n", ret); | |
200 | goto e_regions; | |
201 | } | |
202 | ||
203 | pci_set_master(pdev); | |
204 | ||
205 | ret = ccp_find_mmio_area(ccp); | |
206 | if (ret < 0) | |
207 | goto e_device; | |
208 | bar = ret; | |
209 | ||
210 | ret = -EIO; | |
211 | ccp->io_map = pci_iomap(pdev, bar, 0); | |
212 | if (ccp->io_map == NULL) { | |
213 | dev_err(dev, "pci_iomap failed\n"); | |
214 | goto e_device; | |
215 | } | |
216 | ccp->io_regs = ccp->io_map + IO_OFFSET; | |
217 | ||
218 | ret = dma_set_mask(dev, DMA_BIT_MASK(48)); | |
219 | if (ret == 0) { | |
220 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48)); | |
221 | if (ret) { | |
222 | dev_err(dev, | |
223 | "pci_set_consistent_dma_mask failed (%d)\n", | |
224 | ret); | |
225 | goto e_bar0; | |
226 | } | |
227 | } else { | |
228 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | |
229 | if (ret) { | |
230 | dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret); | |
231 | goto e_bar0; | |
232 | } | |
233 | } | |
234 | ||
235 | dev_set_drvdata(dev, ccp); | |
236 | ||
237 | ret = ccp_init(ccp); | |
238 | if (ret) | |
239 | goto e_bar0; | |
240 | ||
241 | dev_notice(dev, "enabled\n"); | |
242 | ||
243 | return 0; | |
244 | ||
245 | e_bar0: | |
246 | pci_iounmap(pdev, ccp->io_map); | |
247 | ||
248 | e_device: | |
249 | pci_disable_device(pdev); | |
63b94509 TL |
250 | |
251 | e_regions: | |
252 | pci_release_regions(pdev); | |
253 | ||
254 | e_free2: | |
255 | kfree(ccp_pci); | |
256 | ||
257 | e_free1: | |
258 | kfree(ccp); | |
259 | ||
260 | e_err: | |
261 | dev_notice(dev, "initialization failed\n"); | |
262 | return ret; | |
263 | } | |
264 | ||
265 | static void ccp_pci_remove(struct pci_dev *pdev) | |
266 | { | |
267 | struct device *dev = &pdev->dev; | |
268 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
269 | ||
db34cf91 TL |
270 | if (!ccp) |
271 | return; | |
272 | ||
63b94509 TL |
273 | ccp_destroy(ccp); |
274 | ||
275 | pci_iounmap(pdev, ccp->io_map); | |
276 | ||
277 | pci_disable_device(pdev); | |
63b94509 TL |
278 | |
279 | pci_release_regions(pdev); | |
280 | ||
281 | kfree(ccp); | |
282 | ||
283 | dev_notice(dev, "disabled\n"); | |
284 | } | |
285 | ||
286 | #ifdef CONFIG_PM | |
287 | static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |
288 | { | |
289 | struct device *dev = &pdev->dev; | |
290 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
291 | unsigned long flags; | |
292 | unsigned int i; | |
293 | ||
294 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
295 | ||
296 | ccp->suspending = 1; | |
297 | ||
298 | /* Wake all the queue kthreads to prepare for suspend */ | |
299 | for (i = 0; i < ccp->cmd_q_count; i++) | |
300 | wake_up_process(ccp->cmd_q[i].kthread); | |
301 | ||
302 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
303 | ||
304 | /* Wait for all queue kthreads to say they're done */ | |
305 | while (!ccp_queues_suspended(ccp)) | |
306 | wait_event_interruptible(ccp->suspend_queue, | |
307 | ccp_queues_suspended(ccp)); | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | static int ccp_pci_resume(struct pci_dev *pdev) | |
313 | { | |
314 | struct device *dev = &pdev->dev; | |
315 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
316 | unsigned long flags; | |
317 | unsigned int i; | |
318 | ||
319 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
320 | ||
321 | ccp->suspending = 0; | |
322 | ||
323 | /* Wake up all the kthreads */ | |
324 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
325 | ccp->cmd_q[i].suspended = 0; | |
326 | wake_up_process(ccp->cmd_q[i].kthread); | |
327 | } | |
328 | ||
329 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
330 | ||
331 | return 0; | |
332 | } | |
333 | #endif | |
334 | ||
335 | static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = { | |
336 | { PCI_VDEVICE(AMD, 0x1537), }, | |
337 | /* Last entry must be zero */ | |
338 | { 0, } | |
339 | }; | |
340 | MODULE_DEVICE_TABLE(pci, ccp_pci_table); | |
341 | ||
342 | static struct pci_driver ccp_pci_driver = { | |
343 | .name = "AMD Cryptographic Coprocessor", | |
344 | .id_table = ccp_pci_table, | |
345 | .probe = ccp_pci_probe, | |
346 | .remove = ccp_pci_remove, | |
347 | #ifdef CONFIG_PM | |
348 | .suspend = ccp_pci_suspend, | |
349 | .resume = ccp_pci_resume, | |
350 | #endif | |
351 | }; | |
352 | ||
353 | int ccp_pci_init(void) | |
354 | { | |
355 | return pci_register_driver(&ccp_pci_driver); | |
356 | } | |
357 | ||
358 | void ccp_pci_exit(void) | |
359 | { | |
360 | pci_unregister_driver(&ccp_pci_driver); | |
361 | } |