]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/staging/sep/sep_driver.c
Fix common misspellings
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / sep / sep_driver.c
CommitLineData
4856ab33
MA
1/*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 *
31 */
4856ab33
MA
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/miscdevice.h>
35#include <linux/fs.h>
36#include <linux/cdev.h>
37#include <linux/kdev_t.h>
38#include <linux/mutex.h>
39#include <linux/sched.h>
40#include <linux/mm.h>
41#include <linux/poll.h>
42#include <linux/wait.h>
43#include <linux/pci.h>
44#include <linux/firmware.h>
45#include <linux/slab.h>
46#include <linux/ioctl.h>
47#include <asm/current.h>
48#include <linux/ioport.h>
49#include <linux/io.h>
50#include <linux/interrupt.h>
51#include <linux/pagemap.h>
52#include <asm/cacheflush.h>
53#include <linux/sched.h>
54#include <linux/delay.h>
be38efe1 55#include <linux/jiffies.h>
62a8c3a3 56#include <linux/rar_register.h>
4856ab33 57
266aa856
AC
58#include "../memrar/memrar.h"
59
4856ab33
MA
60#include "sep_driver_hw_defs.h"
61#include "sep_driver_config.h"
62#include "sep_driver_api.h"
63#include "sep_dev.h"
64
65/*----------------------------------------
66 DEFINES
67-----------------------------------------*/
68
69#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
70
71/*--------------------------------------------
72 GLOBAL variables
73--------------------------------------------*/
74
75/* Keep this a single static object for now to keep the conversion easy */
76
77static struct sep_device *sep_dev;
78
4856ab33
MA
79/**
80 * sep_dump_message - dump the message that is pending
6eb44c53 81 * @sep: SEP device
4856ab33 82 */
4856ab33
MA
83static void sep_dump_message(struct sep_device *sep)
84{
85 int count;
86 u32 *p = sep->shared_addr;
87 for (count = 0; count < 12 * 4; count += 4)
da14e551
AC
88 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
89 count, *p++);
4856ab33
MA
90}
91
92/**
93 * sep_map_and_alloc_shared_area - allocate shared block
94 * @sep: security processor
95 * @size: size of shared area
96 */
4856ab33
MA
97static int sep_map_and_alloc_shared_area(struct sep_device *sep)
98{
99 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
100 sep->shared_size,
101 &sep->shared_bus, GFP_KERNEL);
102
103 if (!sep->shared_addr) {
104 dev_warn(&sep->pdev->dev,
105 "shared memory dma_alloc_coherent failed\n");
106 return -ENOMEM;
107 }
108 dev_dbg(&sep->pdev->dev,
d1f521c1 109 "shared_addr %zx bytes @%p (bus %llx)\n",
da14e551
AC
110 sep->shared_size, sep->shared_addr,
111 (unsigned long long)sep->shared_bus);
4856ab33
MA
112 return 0;
113}
114
115/**
116 * sep_unmap_and_free_shared_area - free shared block
117 * @sep: security processor
118 */
119static void sep_unmap_and_free_shared_area(struct sep_device *sep)
120{
4856ab33
MA
121 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
122 sep->shared_addr, sep->shared_bus);
123}
124
125/**
126 * sep_shared_bus_to_virt - convert bus/virt addresses
4856ab33
MA
127 * @sep: pointer to struct sep_device
128 * @bus_address: address to convert
d1bb8321
AC
129 *
130 * Returns virtual address inside the shared area according
131 * to the bus address.
4856ab33 132 */
4856ab33
MA
133static void *sep_shared_bus_to_virt(struct sep_device *sep,
134 dma_addr_t bus_address)
135{
136 return sep->shared_addr + (bus_address - sep->shared_bus);
137}
138
139/**
140 * open function for the singleton driver
141 * @inode_ptr struct inode *
142 * @file_ptr struct file *
d1bb8321
AC
143 *
144 * Called when the user opens the singleton device interface
4856ab33 145 */
4856ab33
MA
146static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
147{
4856ab33
MA
148 struct sep_device *sep;
149
d1bb8321 150 /*
6eb44c53 151 * Get the SEP device structure and use it for the
4856ab33
MA
152 * private_data field in filp for other methods
153 */
154 sep = sep_dev;
155
156 file_ptr->private_data = sep;
157
836aded1
AC
158 if (test_and_set_bit(0, &sep->singleton_access_flag))
159 return -EBUSY;
160 return 0;
4856ab33
MA
161}
162
163/**
164 * sep_open - device open method
6eb44c53
AC
165 * @inode: inode of SEP device
166 * @filp: file handle to SEP device
4856ab33
MA
167 *
168 * Open method for the SEP device. Called when userspace opens
e957b063 169 * the SEP device node.
4856ab33
MA
170 *
171 * Returns zero on success otherwise an error code.
172 */
4856ab33
MA
173static int sep_open(struct inode *inode, struct file *filp)
174{
175 struct sep_device *sep;
176
d1bb8321 177 /*
6eb44c53 178 * Get the SEP device structure and use it for the
4856ab33
MA
179 * private_data field in filp for other methods
180 */
181 sep = sep_dev;
182 filp->private_data = sep;
183
d1bb8321 184 /* Anyone can open; locking takes place at transaction level */
4856ab33
MA
185 return 0;
186}
187
188/**
189 * sep_singleton_release - close a SEP singleton device
190 * @inode: inode of SEP device
191 * @filp: file handle being closed
192 *
193 * Called on the final close of a SEP device. As the open protects against
194 * multiple simultaenous opens that means this method is called when the
195 * final reference to the open handle is dropped.
196 */
4856ab33
MA
197static int sep_singleton_release(struct inode *inode, struct file *filp)
198{
199 struct sep_device *sep = filp->private_data;
200
4856ab33 201 clear_bit(0, &sep->singleton_access_flag);
4856ab33
MA
202 return 0;
203}
204
205/**
206 * sep_request_daemonopen - request daemon open method
6eb44c53
AC
207 * @inode: inode of SEP device
208 * @filp: file handle to SEP device
4856ab33
MA
209 *
210 * Open method for the SEP request daemon. Called when
d1bb8321 211 * request daemon in userspace opens the SEP device node.
4856ab33
MA
212 *
213 * Returns zero on success otherwise an error code.
214 */
4856ab33
MA
215static int sep_request_daemon_open(struct inode *inode, struct file *filp)
216{
da14e551 217 struct sep_device *sep = sep_dev;
4856ab33
MA
218 int error = 0;
219
4856ab33
MA
220 filp->private_data = sep;
221
4856ab33 222 /* There is supposed to be only one request daemon */
da14e551 223 if (test_and_set_bit(0, &sep->request_daemon_open))
4856ab33 224 error = -EBUSY;
4856ab33
MA
225 return error;
226}
227
228/**
229 * sep_request_daemon_release - close a SEP daemon
230 * @inode: inode of SEP device
231 * @filp: file handle being closed
232 *
233 * Called on the final close of a SEP daemon.
234 */
4856ab33
MA
235static int sep_request_daemon_release(struct inode *inode, struct file *filp)
236{
237 struct sep_device *sep = filp->private_data;
238
dfcfc166 239 dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
4856ab33
MA
240 current->pid);
241
6eb44c53 242 /* Clear the request_daemon_open flag */
4856ab33 243 clear_bit(0, &sep->request_daemon_open);
4856ab33
MA
244 return 0;
245}
246
247/**
d1bb8321
AC
248 * sep_req_daemon_send_reply_command_handler - poke the SEP
249 * @sep: struct sep_device *
250 *
4856ab33
MA
251 * This function raises interrupt to SEPm that signals that is has a
252 * new command from HOST
4856ab33
MA
253 */
254static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
255{
4856ab33
MA
256 unsigned long lck_flags;
257
4856ab33
MA
258 sep_dump_message(sep);
259
6eb44c53 260 /* Counters are lockable region */
4856ab33
MA
261 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
262 sep->send_ct++;
263 sep->reply_ct++;
264
6eb44c53 265 /* Send the interrupt to SEP */
da3f825b 266 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
4856ab33
MA
267 sep->send_ct++;
268
269 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
270
271 dev_dbg(&sep->pdev->dev,
272 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
273 sep->send_ct, sep->reply_ct);
274
da14e551 275 return 0;
4856ab33
MA
276}
277
278
279/**
d1bb8321 280 * sep_free_dma_table_data_handler - free DMA table
4856ab33 281 * @sep: pointere to struct sep_device
d1bb8321 282 *
6eb44c53 283 * Handles the request to free DMA table for synchronic actions
4856ab33 284 */
4856ab33
MA
285static int sep_free_dma_table_data_handler(struct sep_device *sep)
286{
da14e551
AC
287 int count;
288 int dcb_counter;
6eb44c53 289 /* Pointer to the current dma_resource struct */
4856ab33
MA
290 struct sep_dma_resource *dma;
291
da14e551 292 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
4856ab33
MA
293 dma = &sep->dma_res_arr[dcb_counter];
294
6eb44c53 295 /* Unmap and free input map array */
4856ab33 296 if (dma->in_map_array) {
da14e551 297 for (count = 0; count < dma->in_num_pages; count++) {
4856ab33
MA
298 dma_unmap_page(&sep->pdev->dev,
299 dma->in_map_array[count].dma_addr,
300 dma->in_map_array[count].size,
301 DMA_TO_DEVICE);
302 }
4856ab33
MA
303 kfree(dma->in_map_array);
304 }
305
6eb44c53 306 /* Unmap output map array, DON'T free it yet */
4856ab33 307 if (dma->out_map_array) {
da14e551 308 for (count = 0; count < dma->out_num_pages; count++) {
4856ab33
MA
309 dma_unmap_page(&sep->pdev->dev,
310 dma->out_map_array[count].dma_addr,
311 dma->out_map_array[count].size,
312 DMA_FROM_DEVICE);
313 }
4856ab33
MA
314 kfree(dma->out_map_array);
315 }
316
6eb44c53 317 /* Free page cache for output */
4856ab33 318 if (dma->in_page_array) {
da14e551 319 for (count = 0; count < dma->in_num_pages; count++) {
4856ab33
MA
320 flush_dcache_page(dma->in_page_array[count]);
321 page_cache_release(dma->in_page_array[count]);
322 }
4856ab33 323 kfree(dma->in_page_array);
4856ab33
MA
324 }
325
326 if (dma->out_page_array) {
da14e551 327 for (count = 0; count < dma->out_num_pages; count++) {
4856ab33
MA
328 if (!PageReserved(dma->out_page_array[count]))
329 SetPageDirty(dma->out_page_array[count]);
4856ab33
MA
330 flush_dcache_page(dma->out_page_array[count]);
331 page_cache_release(dma->out_page_array[count]);
332 }
4856ab33
MA
333 kfree(dma->out_page_array);
334 }
335
6eb44c53 336 /* Reset all the values */
dda16b23
PH
337 dma->in_page_array = NULL;
338 dma->out_page_array = NULL;
4856ab33
MA
339 dma->in_num_pages = 0;
340 dma->out_num_pages = 0;
dda16b23
PH
341 dma->in_map_array = NULL;
342 dma->out_map_array = NULL;
4856ab33
MA
343 dma->in_map_num_entries = 0;
344 dma->out_map_num_entries = 0;
4856ab33
MA
345 }
346
347 sep->nr_dcb_creat = 0;
348 sep->num_lli_tables_created = 0;
349
4856ab33
MA
350 return 0;
351}
352
4856ab33 353/**
d1bb8321 354 * sep_request_daemon_mmap - maps the shared area to user space
4856ab33
MA
355 * @filp: pointer to struct file
356 * @vma: pointer to vm_area_struct
d1bb8321
AC
357 *
358 * Called by the kernel when the daemon attempts an mmap() syscall
359 * using our handle.
4856ab33
MA
360 */
361static int sep_request_daemon_mmap(struct file *filp,
362 struct vm_area_struct *vma)
363{
4856ab33 364 struct sep_device *sep = filp->private_data;
4856ab33 365 dma_addr_t bus_address;
4856ab33
MA
366 int error = 0;
367
4856ab33
MA
368 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
369 error = -EINVAL;
370 goto end_function;
371 }
372
6eb44c53 373 /* Get physical address */
4856ab33
MA
374 bus_address = sep->shared_bus;
375
4856ab33
MA
376 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
377 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
378
da14e551 379 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
4856ab33
MA
380 error = -EAGAIN;
381 goto end_function;
382 }
383
384end_function:
4856ab33
MA
385 return error;
386}
387
388/**
d1bb8321 389 * sep_request_daemon_poll - poll implementation
6eb44c53 390 * @sep: struct sep_device * for current SEP device
4856ab33
MA
391 * @filp: struct file * for open file
392 * @wait: poll_table * for poll
d1bb8321
AC
393 *
394 * Called when our device is part of a poll() or select() syscall
4856ab33
MA
395 */
396static unsigned int sep_request_daemon_poll(struct file *filp,
397 poll_table *wait)
398{
399 u32 mask = 0;
4856ab33
MA
400 /* GPR2 register */
401 u32 retval2;
4856ab33 402 unsigned long lck_flags;
4856ab33
MA
403 struct sep_device *sep = filp->private_data;
404
4856ab33
MA
405 poll_wait(filp, &sep->event_request_daemon, wait);
406
da14e551
AC
407 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
408 sep->send_ct, sep->reply_ct);
4856ab33
MA
409
410 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
6eb44c53 411 /* Check if the data is ready */
4856ab33 412 if (sep->send_ct == sep->reply_ct) {
4856ab33
MA
413 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
414
415 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4856ab33
MA
416 dev_dbg(&sep->pdev->dev,
417 "daemon poll: data check (GPR2) is %x\n", retval2);
418
6eb44c53 419 /* Check if PRINT request */
4856ab33 420 if ((retval2 >> 30) & 0x1) {
da14e551 421 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
4856ab33
MA
422 mask |= POLLIN;
423 goto end_function;
424 }
6eb44c53 425 /* Check if NVS request */
4856ab33 426 if (retval2 >> 31) {
da14e551 427 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
4856ab33
MA
428 mask |= POLLPRI | POLLWRNORM;
429 }
da14e551 430 } else {
4856ab33 431 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
4856ab33
MA
432 dev_dbg(&sep->pdev->dev,
433 "daemon poll: no reply received; returning 0\n");
434 mask = 0;
435 }
4856ab33 436end_function:
4856ab33
MA
437 return mask;
438}
439
440/**
441 * sep_release - close a SEP device
442 * @inode: inode of SEP device
443 * @filp: file handle being closed
444 *
445 * Called on the final close of a SEP device.
446 */
4856ab33
MA
447static int sep_release(struct inode *inode, struct file *filp)
448{
449 struct sep_device *sep = filp->private_data;
450
451 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
452
453 mutex_lock(&sep->sep_mutex);
6eb44c53 454 /* Is this the process that has a transaction open?
4856ab33
MA
455 * If so, lets reset pid_doing_transaction to 0 and
456 * clear the in use flags, and then wake up sep_event
457 * so that other processes can do transactions
458 */
4856ab33
MA
459 if (sep->pid_doing_transaction == current->pid) {
460 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
461 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
462 sep_free_dma_table_data_handler(sep);
463 wake_up(&sep->event);
464 sep->pid_doing_transaction = 0;
465 }
466
467 mutex_unlock(&sep->sep_mutex);
468 return 0;
469}
470
471/**
472 * sep_mmap - maps the shared area to user space
473 * @filp: pointer to struct file
474 * @vma: pointer to vm_area_struct
d1bb8321 475 *
6eb44c53 476 * Called on an mmap of our space via the normal SEP device
4856ab33
MA
477 */
478static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
479{
480 dma_addr_t bus_addr;
481 struct sep_device *sep = filp->private_data;
482 unsigned long error = 0;
483
4856ab33
MA
484 /* Set the transaction busy (own the device) */
485 wait_event_interruptible(sep->event,
486 test_and_set_bit(SEP_MMAP_LOCK_BIT,
487 &sep->in_use_flags) == 0);
488
489 if (signal_pending(current)) {
490 error = -EINTR;
491 goto end_function_with_error;
492 }
493 /*
494 * The pid_doing_transaction indicates that this process
495 * now owns the facilities to performa a transaction with
6eb44c53
AC
496 * the SEP. While this process is performing a transaction,
497 * no other process who has the SEP device open can perform
4856ab33
MA
498 * any transactions. This method allows more than one process
499 * to have the device open at any given time, which provides
500 * finer granularity for device utilization by multiple
501 * processes.
502 */
503 mutex_lock(&sep->sep_mutex);
504 sep->pid_doing_transaction = current->pid;
505 mutex_unlock(&sep->sep_mutex);
506
6eb44c53 507 /* Zero the pools and the number of data pool alocation pointers */
4856ab33
MA
508 sep->data_pool_bytes_allocated = 0;
509 sep->num_of_data_allocations = 0;
510
511 /*
6eb44c53 512 * Check that the size of the mapped range is as the size of the message
4856ab33
MA
513 * shared area
514 */
515 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
516 error = -EINVAL;
517 goto end_function_with_error;
518 }
519
da14e551 520 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
4856ab33 521
6eb44c53 522 /* Get bus address */
4856ab33
MA
523 bus_addr = sep->shared_bus;
524
4856ab33
MA
525 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
526 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
da14e551 527 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
4856ab33
MA
528 error = -EAGAIN;
529 goto end_function_with_error;
530 }
4856ab33
MA
531 goto end_function;
532
533end_function_with_error:
6eb44c53 534 /* Clear the bit */
4856ab33
MA
535 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
536 mutex_lock(&sep->sep_mutex);
537 sep->pid_doing_transaction = 0;
538 mutex_unlock(&sep->sep_mutex);
539
6eb44c53 540 /* Raise event for stuck contextes */
4856ab33 541
4856ab33
MA
542 wake_up(&sep->event);
543
544end_function:
4856ab33
MA
545 return error;
546}
547
548/**
d1bb8321 549 * sep_poll - poll handler
4856ab33
MA
550 * @filp: pointer to struct file
551 * @wait: pointer to poll_table
d1bb8321
AC
552 *
553 * Called by the OS when the kernel is asked to do a poll on
554 * a SEP file handle.
4856ab33
MA
555 */
556static unsigned int sep_poll(struct file *filp, poll_table *wait)
557{
558 u32 mask = 0;
559 u32 retval = 0;
560 u32 retval2 = 0;
4856ab33
MA
561 unsigned long lck_flags;
562
563 struct sep_device *sep = filp->private_data;
564
d1bb8321 565 /* Am I the process that owns the transaction? */
4856ab33
MA
566 mutex_lock(&sep->sep_mutex);
567 if (current->pid != sep->pid_doing_transaction) {
836aded1 568 dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
4856ab33
MA
569 mask = POLLERR;
570 mutex_unlock(&sep->sep_mutex);
571 goto end_function;
572 }
4856ab33
MA
573 mutex_unlock(&sep->sep_mutex);
574
6eb44c53 575 /* Check if send command or send_reply were activated previously */
4856ab33 576 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
4856ab33
MA
577 mask = POLLERR;
578 goto end_function;
579 }
580
6eb44c53 581 /* Add the event to the polling wait table */
4856ab33
MA
582 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
583
584 poll_wait(filp, &sep->event, wait);
585
4856ab33
MA
586 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
587 sep->send_ct, sep->reply_ct);
588
25985edc 589 /* Check if error occurred during poll */
4856ab33 590 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4856ab33 591 if (retval2 != 0x0) {
da14e551 592 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
4856ab33
MA
593 mask |= POLLERR;
594 goto end_function;
595 }
596
597 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
598
599 if (sep->send_ct == sep->reply_ct) {
4856ab33
MA
600 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
601 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
602 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
603 retval);
604
6eb44c53 605 /* Check if printf request */
4856ab33 606 if ((retval >> 30) & 0x1) {
6eb44c53 607 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
4856ab33
MA
608 wake_up(&sep->event_request_daemon);
609 goto end_function;
4856ab33
MA
610 }
611
6eb44c53 612 /* Check if the this is SEP reply or request */
4856ab33 613 if (retval >> 31) {
6eb44c53 614 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
4856ab33 615 wake_up(&sep->event_request_daemon);
4856ab33 616 } else {
4856ab33 617 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
6eb44c53 618 /* In case it is again by send_reply_comand */
4856ab33 619 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
4856ab33 620 sep_dump_message(sep);
4856ab33 621 dev_dbg(&sep->pdev->dev,
6eb44c53 622 "poll; SEP reply POLLIN | POLLRDNORM\n");
4856ab33
MA
623 mask |= POLLIN | POLLRDNORM;
624 }
4856ab33 625 } else {
4856ab33 626 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
4856ab33
MA
627 dev_dbg(&sep->pdev->dev,
628 "poll; no reply received; returning mask of 0\n");
629 mask = 0;
630 }
631
632end_function:
4856ab33
MA
633 return mask;
634}
635
636/**
637 * sep_time_address - address in SEP memory of time
638 * @sep: SEP device we want the address from
639 *
640 * Return the address of the two dwords in memory used for time
641 * setting.
642 */
4856ab33
MA
643static u32 *sep_time_address(struct sep_device *sep)
644{
645 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
646}
647
648/**
649 * sep_set_time - set the SEP time
650 * @sep: the SEP we are setting the time for
651 *
652 * Calculates time and sets it at the predefined address.
6eb44c53 653 * Called with the SEP mutex held.
4856ab33
MA
654 */
655static unsigned long sep_set_time(struct sep_device *sep)
656{
657 struct timeval time;
6eb44c53 658 u32 *time_addr; /* Address of time as seen by the kernel */
4856ab33
MA
659
660
4856ab33
MA
661 do_gettimeofday(&time);
662
6eb44c53 663 /* Set value in the SYSTEM MEMORY offset */
4856ab33
MA
664 time_addr = sep_time_address(sep);
665
666 time_addr[0] = SEP_TIME_VAL_TOKEN;
667 time_addr[1] = time.tv_sec;
668
da14e551
AC
669 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
670 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
671 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
4856ab33
MA
672
673 return time.tv_sec;
674}
675
4856ab33 676/**
d1bb8321 677 * sep_set_caller_id_handler - insert caller id entry
6eb44c53 678 * @sep: SEP device
4856ab33 679 * @arg: pointer to struct caller_id_struct
d1bb8321
AC
680 *
681 * Inserts the data into the caller id table. Note that this function
682 * falls under the ioctl lock
4856ab33 683 */
c100fa4d 684static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
4856ab33
MA
685{
686 void __user *hash;
da14e551 687 int error = 0;
4856ab33
MA
688 int i;
689 struct caller_id_struct command_args;
690
4856ab33
MA
691 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
692 if (sep->caller_id_table[i].pid == 0)
693 break;
694 }
695
696 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
836aded1
AC
697 dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
698 dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
da14e551 699 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
4856ab33
MA
700 error = -EUSERS;
701 goto end_function;
702 }
703
6eb44c53 704 /* Copy the data */
4856ab33
MA
705 if (copy_from_user(&command_args, (void __user *)arg,
706 sizeof(command_args))) {
707 error = -EFAULT;
708 goto end_function;
709 }
710
711 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
712
713 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
714 error = -EINVAL;
715 goto end_function;
716 }
717
718 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
719 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
720 command_args.callerIdSizeInBytes);
721
722 if (command_args.callerIdSizeInBytes >
da14e551 723 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
4856ab33
MA
724 error = -EMSGSIZE;
725 goto end_function;
726 }
727
728 sep->caller_id_table[i].pid = command_args.pid;
729
730 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
731 hash, command_args.callerIdSizeInBytes))
732 error = -EFAULT;
733end_function:
4856ab33
MA
734 return error;
735}
736
737/**
d1bb8321 738 * sep_set_current_caller_id - set the caller id
6eb44c53 739 * @sep: pointer to struct_sep_device
d1bb8321 740 *
6eb44c53 741 * Set the caller ID (if it exists) to the SEP. Note that this
d1bb8321 742 * function falls under the ioctl lock
4856ab33
MA
743 */
744static int sep_set_current_caller_id(struct sep_device *sep)
745{
746 int i;
653bf0cf 747 u32 *hash_buf_ptr;
4856ab33 748
6eb44c53 749 /* Zero the previous value */
da14e551
AC
750 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
751 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
4856ab33
MA
752
753 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
754 if (sep->caller_id_table[i].pid == current->pid) {
755 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
756
da14e551 757 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
4856ab33
MA
758 (void *)(sep->caller_id_table[i].callerIdHash),
759 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
760 break;
761 }
762 }
653bf0cf
MA
763 /* Ensure data is in little endian */
764 hash_buf_ptr = (u32 *)sep->shared_addr +
765 SEP_CALLER_ID_OFFSET_BYTES;
766
767 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
768 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
769
4856ab33
MA
770 return 0;
771}
772
773/**
774 * sep_send_command_handler - kick off a command
6eb44c53 775 * @sep: SEP being signalled
d1bb8321 776 *
4856ab33
MA
777 * This function raises interrupt to SEP that signals that is has a new
778 * command from the host
d1bb8321
AC
779 *
780 * Note that this function does fall under the ioctl lock
4856ab33 781 */
4856ab33
MA
782static int sep_send_command_handler(struct sep_device *sep)
783{
784 unsigned long lck_flags;
785 int error = 0;
786
4856ab33
MA
787 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
788 error = -EPROTO;
789 goto end_function;
790 }
4856ab33
MA
791 sep_set_time(sep);
792
0dd12c44 793 sep_set_current_caller_id(sep);
4856ab33
MA
794
795 sep_dump_message(sep);
796
6eb44c53 797 /* Update counter */
4856ab33
MA
798 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
799 sep->send_ct++;
800 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
801
da14e551
AC
802 dev_dbg(&sep->pdev->dev,
803 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
804 sep->send_ct, sep->reply_ct);
4856ab33 805
6eb44c53 806 /* Send interrupt to SEP */
4856ab33
MA
807 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
808
809end_function:
4856ab33
MA
810 return error;
811}
812
813/**
d1bb8321 814 * sep_allocate_data_pool_memory_handler -allocate pool memory
6eb44c53 815 * @sep: pointer to struct sep_device
d1bb8321
AC
816 * @arg: pointer to struct alloc_struct
817 *
4856ab33
MA
818 * This function handles the allocate data pool memory request
819 * This function returns calculates the bus address of the
820 * allocated memory, and the offset of this area from the mapped address.
821 * Therefore, the FVOs in user space can calculate the exact virtual
822 * address of this allocated memory
4856ab33
MA
823 */
824static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
825 unsigned long arg)
826{
827 int error = 0;
828 struct alloc_struct command_args;
829
d1bb8321 830 /* Holds the allocated buffer address in the system memory pool */
4856ab33
MA
831 u32 *token_addr;
832
4856ab33
MA
833 if (copy_from_user(&command_args, (void __user *)arg,
834 sizeof(struct alloc_struct))) {
835 error = -EFAULT;
836 goto end_function;
837 }
838
d1bb8321 839 /* Allocate memory */
4856ab33
MA
840 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
841 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
842 error = -ENOMEM;
843 goto end_function;
844 }
845
846 dev_dbg(&sep->pdev->dev,
dfcfc166 847 "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
4856ab33
MA
848 dev_dbg(&sep->pdev->dev,
849 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
d1bb8321 850 /* Set the virtual and bus address */
4856ab33
MA
851 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
852 sep->data_pool_bytes_allocated;
853
6eb44c53 854 /* Place in the shared area that is known by the SEP */
4856ab33
MA
855 token_addr = (u32 *)(sep->shared_addr +
856 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
857 (sep->num_of_data_allocations)*2*sizeof(u32));
858
4856ab33
MA
859 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
860 token_addr[1] = (u32)sep->shared_bus +
861 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
862 sep->data_pool_bytes_allocated;
863
d1bb8321 864 /* Write the memory back to the user space */
4856ab33
MA
865 error = copy_to_user((void *)arg, (void *)&command_args,
866 sizeof(struct alloc_struct));
867 if (error) {
da14e551 868 error = -EFAULT;
4856ab33
MA
869 goto end_function;
870 }
871
6eb44c53 872 /* Update the allocation */
4856ab33
MA
873 sep->data_pool_bytes_allocated += command_args.num_bytes;
874 sep->num_of_data_allocations += 1;
875
4856ab33 876end_function:
4856ab33
MA
877 return error;
878}
879
880/**
d1bb8321 881 * sep_lock_kernel_pages - map kernel pages for DMA
4856ab33
MA
882 * @sep: pointer to struct sep_device
883 * @kernel_virt_addr: address of data buffer in kernel
884 * @data_size: size of data
885 * @lli_array_ptr: lli array
886 * @in_out_flag: input into device or output from device
d1bb8321
AC
887 *
888 * This function locks all the physical pages of the kernel virtual buffer
889 * and construct a basic lli array, where each entry holds the physical
890 * page address and the size that application data holds in this page
891 * This function is used only during kernel crypto mod calls from within
892 * the kernel (when ioctl is not used)
4856ab33
MA
893 */
894static int sep_lock_kernel_pages(struct sep_device *sep,
c100fa4d 895 unsigned long kernel_virt_addr,
4856ab33
MA
896 u32 data_size,
897 struct sep_lli_entry **lli_array_ptr,
898 int in_out_flag)
899
900{
4856ab33 901 int error = 0;
6eb44c53 902 /* Array of lli */
4856ab33 903 struct sep_lli_entry *lli_array;
6eb44c53 904 /* Map array */
4856ab33
MA
905 struct sep_dma_map *map_array;
906
dfcfc166 907 dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
c100fa4d 908 (unsigned long)kernel_virt_addr);
6f89be93 909 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
4856ab33
MA
910
911 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
912 if (!lli_array) {
4856ab33
MA
913 error = -ENOMEM;
914 goto end_function;
915 }
4856ab33
MA
916 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
917 if (!map_array) {
918 error = -ENOMEM;
919 goto end_function_with_error;
920 }
921
922 map_array[0].dma_addr =
923 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
924 data_size, DMA_BIDIRECTIONAL);
925 map_array[0].size = data_size;
926
927
928 /*
6eb44c53 929 * Set the start address of the first page - app data may start not at
4856ab33
MA
930 * the beginning of the page
931 */
932 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
933 lli_array[0].block_size = map_array[0].size;
934
935 dev_dbg(&sep->pdev->dev,
da14e551 936 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
4856ab33
MA
937 (unsigned long)lli_array[0].bus_address,
938 lli_array[0].block_size);
939
6eb44c53 940 /* Set the output parameters */
4856ab33
MA
941 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
942 *lli_array_ptr = lli_array;
943 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
dda16b23 944 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
4856ab33
MA
945 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
946 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
947 } else {
948 *lli_array_ptr = lli_array;
949 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
dda16b23 950 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
4856ab33
MA
951 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
952 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
953 }
4856ab33
MA
954 goto end_function;
955
956end_function_with_error:
4856ab33
MA
957 kfree(lli_array);
958
959end_function:
4856ab33
MA
960 return error;
961}
962
963/**
d1bb8321 964 * sep_lock_user_pages - lock and map user pages for DMA
4856ab33
MA
965 * @sep: pointer to struct sep_device
966 * @app_virt_addr: user memory data buffer
967 * @data_size: size of data buffer
968 * @lli_array_ptr: lli array
969 * @in_out_flag: input or output to device
d1bb8321
AC
970 *
971 * This function locks all the physical pages of the application
972 * virtual buffer and construct a basic lli array, where each entry
973 * holds the physical page address and the size that application
974 * data holds in this physical pages
4856ab33
MA
975 */
976static int sep_lock_user_pages(struct sep_device *sep,
977 u32 app_virt_addr,
978 u32 data_size,
979 struct sep_lli_entry **lli_array_ptr,
980 int in_out_flag)
981
982{
da14e551
AC
983 int error = 0;
984 u32 count;
985 int result;
6eb44c53 986 /* The the page of the end address of the user space buffer */
4856ab33 987 u32 end_page;
6eb44c53 988 /* The page of the start address of the user space buffer */
4856ab33 989 u32 start_page;
6eb44c53 990 /* The range in pages */
4856ab33 991 u32 num_pages;
6eb44c53 992 /* Array of pointers to page */
4856ab33 993 struct page **page_array;
6eb44c53 994 /* Array of lli */
4856ab33 995 struct sep_lli_entry *lli_array;
6eb44c53 996 /* Map array */
4856ab33 997 struct sep_dma_map *map_array;
6eb44c53 998 /* Direction of the DMA mapping for locked pages */
4856ab33
MA
999 enum dma_data_direction dir;
1000
6eb44c53 1001 /* Set start and end pages and num pages */
4856ab33
MA
1002 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1003 start_page = app_virt_addr >> PAGE_SHIFT;
1004 num_pages = end_page - start_page + 1;
1005
dfcfc166 1006 dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
da14e551
AC
1007 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1008 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1009 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1010 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
4856ab33 1011
6eb44c53 1012 /* Allocate array of pages structure pointers */
4856ab33
MA
1013 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1014 if (!page_array) {
1015 error = -ENOMEM;
1016 goto end_function;
1017 }
4856ab33
MA
1018 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1019 if (!map_array) {
da14e551 1020 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
4856ab33
MA
1021 error = -ENOMEM;
1022 goto end_function_with_error1;
1023 }
1024
1025 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1026 GFP_ATOMIC);
1027
1028 if (!lli_array) {
da14e551 1029 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
4856ab33
MA
1030 error = -ENOMEM;
1031 goto end_function_with_error2;
1032 }
1033
6eb44c53 1034 /* Convert the application virtual address into a set of physical */
4856ab33
MA
1035 down_read(&current->mm->mmap_sem);
1036 result = get_user_pages(current, current->mm, app_virt_addr,
1037 num_pages,
1038 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
dda16b23 1039 0, page_array, NULL);
4856ab33
MA
1040
1041 up_read(&current->mm->mmap_sem);
1042
6eb44c53 1043 /* Check the number of pages locked - if not all then exit with error */
4856ab33 1044 if (result != num_pages) {
4856ab33
MA
1045 dev_warn(&sep->pdev->dev,
1046 "not all pages locked by get_user_pages\n");
1047 error = -ENOMEM;
1048 goto end_function_with_error3;
1049 }
1050
da14e551 1051 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
4856ab33 1052
6eb44c53 1053 /* Set direction */
4856ab33
MA
1054 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1055 dir = DMA_TO_DEVICE;
1056 else
1057 dir = DMA_FROM_DEVICE;
1058
1059 /*
6eb44c53
AC
1060 * Fill the array using page array data and
1061 * map the pages - this action will also flush the cache as needed
4856ab33
MA
1062 */
1063 for (count = 0; count < num_pages; count++) {
6eb44c53 1064 /* Fill the map array */
4856ab33
MA
1065 map_array[count].dma_addr =
1066 dma_map_page(&sep->pdev->dev, page_array[count],
1067 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1068
1069 map_array[count].size = PAGE_SIZE;
1070
6eb44c53 1071 /* Fill the lli array entry */
4856ab33
MA
1072 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1073 lli_array[count].block_size = PAGE_SIZE;
1074
da14e551 1075 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
4856ab33
MA
1076 count, (unsigned long)lli_array[count].bus_address,
1077 count, lli_array[count].block_size);
1078 }
1079
6eb44c53 1080 /* Check the offset for the first page */
4856ab33
MA
1081 lli_array[0].bus_address =
1082 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1083
6eb44c53 1084 /* Check that not all the data is in the first page only */
4856ab33
MA
1085 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1086 lli_array[0].block_size = data_size;
1087 else
1088 lli_array[0].block_size =
1089 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1090
1091 dev_dbg(&sep->pdev->dev,
da14e551 1092 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
4856ab33
MA
1093 (unsigned long)lli_array[count].bus_address,
1094 lli_array[count].block_size);
1095
6eb44c53 1096 /* Check the size of the last page */
4856ab33 1097 if (num_pages > 1) {
4856ab33
MA
1098 lli_array[num_pages - 1].block_size =
1099 (app_virt_addr + data_size) & (~PAGE_MASK);
1100
1101 dev_warn(&sep->pdev->dev,
da14e551 1102 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
4856ab33
MA
1103 num_pages - 1,
1104 (unsigned long)lli_array[count].bus_address,
1105 num_pages - 1,
1106 lli_array[count].block_size);
1107 }
1108
25985edc 1109 /* Set output params according to the in_out flag */
4856ab33
MA
1110 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1111 *lli_array_ptr = lli_array;
da14e551
AC
1112 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1113 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1114 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
4856ab33 1115 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
da14e551 1116 num_pages;
4856ab33
MA
1117 } else {
1118 *lli_array_ptr = lli_array;
da14e551 1119 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
4856ab33 1120 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
da14e551
AC
1121 page_array;
1122 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
4856ab33 1123 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
da14e551 1124 num_pages;
4856ab33 1125 }
4856ab33
MA
1126 goto end_function;
1127
1128end_function_with_error3:
6eb44c53 1129 /* Free lli array */
4856ab33
MA
1130 kfree(lli_array);
1131
1132end_function_with_error2:
4856ab33
MA
1133 kfree(map_array);
1134
1135end_function_with_error1:
6eb44c53 1136 /* Free page array */
4856ab33
MA
1137 kfree(page_array);
1138
1139end_function:
4856ab33
MA
1140 return error;
1141}
1142
1143/**
d1bb8321 1144 * u32 sep_calculate_lli_table_max_size - size the LLI table
4856ab33
MA
1145 * @sep: pointer to struct sep_device
1146 * @lli_in_array_ptr
1147 * @num_array_entries
1148 * @last_table_flag
d1bb8321
AC
1149 *
1150 * This function calculates the size of data that can be inserted into
1151 * the lli table from this array, such that either the table is full
1152 * (all entries are entered), or there are no more entries in the
1153 * lli array
4856ab33
MA
1154 */
1155static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1156 struct sep_lli_entry *lli_in_array_ptr,
1157 u32 num_array_entries,
1158 u32 *last_table_flag)
1159{
da14e551 1160 u32 counter;
6eb44c53 1161 /* Table data size */
da14e551 1162 u32 table_data_size = 0;
6eb44c53 1163 /* Data size for the next table */
4856ab33
MA
1164 u32 next_table_data_size;
1165
4856ab33
MA
1166 *last_table_flag = 0;
1167
1168 /*
6eb44c53 1169 * Calculate the data in the out lli table till we fill the whole
4856ab33
MA
1170 * table or till the data has ended
1171 */
1172 for (counter = 0;
1173 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
da14e551 1174 (counter < num_array_entries); counter++)
4856ab33
MA
1175 table_data_size += lli_in_array_ptr[counter].block_size;
1176
1177 /*
6eb44c53 1178 * Check if we reached the last entry,
4856ab33
MA
1179 * meaning this ia the last table to build,
1180 * and no need to check the block alignment
1181 */
1182 if (counter == num_array_entries) {
6eb44c53 1183 /* Set the last table flag */
4856ab33
MA
1184 *last_table_flag = 1;
1185 goto end_function;
1186 }
1187
1188 /*
6eb44c53
AC
1189 * Calculate the data size of the next table.
1190 * Stop if no entries left or if data size is more the DMA restriction
4856ab33
MA
1191 */
1192 next_table_data_size = 0;
1193 for (; counter < num_array_entries; counter++) {
4856ab33 1194 next_table_data_size += lli_in_array_ptr[counter].block_size;
4856ab33 1195 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
4856ab33
MA
1196 break;
1197 }
1198
1199 /*
6eb44c53 1200 * Check if the next table data size is less then DMA rstriction.
4856ab33
MA
1201 * if it is - recalculate the current table size, so that the next
1202 * table data size will be adaquete for DMA
1203 */
1204 if (next_table_data_size &&
1205 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1206
1207 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1208 next_table_data_size);
1209
4856ab33 1210end_function:
4856ab33
MA
1211 return table_data_size;
1212}
1213
1214/**
d1bb8321 1215 * sep_build_lli_table - build an lli array for the given table
4856ab33
MA
1216 * @sep: pointer to struct sep_device
1217 * @lli_array_ptr: pointer to lli array
1218 * @lli_table_ptr: pointer to lli table
1219 * @num_processed_entries_ptr: pointer to number of entries
1220 * @num_table_entries_ptr: pointer to number of tables
1221 * @table_data_size: total data size
d1bb8321
AC
1222 *
1223 * Builds ant lli table from the lli_array according to
1224 * the given size of data
4856ab33
MA
1225 */
1226static void sep_build_lli_table(struct sep_device *sep,
1227 struct sep_lli_entry *lli_array_ptr,
1228 struct sep_lli_entry *lli_table_ptr,
1229 u32 *num_processed_entries_ptr,
1230 u32 *num_table_entries_ptr,
1231 u32 table_data_size)
1232{
6eb44c53 1233 /* Current table data size */
4856ab33 1234 u32 curr_table_data_size;
6eb44c53 1235 /* Counter of lli array entry */
4856ab33
MA
1236 u32 array_counter;
1237
6eb44c53 1238 /* Init currrent table data size and lli array entry counter */
4856ab33
MA
1239 curr_table_data_size = 0;
1240 array_counter = 0;
1241 *num_table_entries_ptr = 1;
1242
dfcfc166 1243 dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
4856ab33 1244
6eb44c53 1245 /* Fill the table till table size reaches the needed amount */
4856ab33 1246 while (curr_table_data_size < table_data_size) {
6eb44c53 1247 /* Update the number of entries in table */
4856ab33
MA
1248 (*num_table_entries_ptr)++;
1249
1250 lli_table_ptr->bus_address =
1251 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1252
1253 lli_table_ptr->block_size =
1254 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1255
1256 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1257
da14e551
AC
1258 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1259 lli_table_ptr);
1260 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1261 (unsigned long)lli_table_ptr->bus_address);
1262 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
4856ab33
MA
1263 lli_table_ptr->block_size);
1264
6eb44c53 1265 /* Check for overflow of the table data */
4856ab33 1266 if (curr_table_data_size > table_data_size) {
4856ab33
MA
1267 dev_dbg(&sep->pdev->dev,
1268 "curr_table_data_size too large\n");
1269
6eb44c53 1270 /* Update the size of block in the table */
4856ab33
MA
1271 lli_table_ptr->block_size -=
1272 cpu_to_le32((curr_table_data_size - table_data_size));
1273
6eb44c53 1274 /* Update the physical address in the lli array */
4856ab33
MA
1275 lli_array_ptr[array_counter].bus_address +=
1276 cpu_to_le32(lli_table_ptr->block_size);
1277
6eb44c53 1278 /* Update the block size left in the lli array */
4856ab33
MA
1279 lli_array_ptr[array_counter].block_size =
1280 (curr_table_data_size - table_data_size);
4856ab33 1281 } else
6eb44c53 1282 /* Advance to the next entry in the lli_array */
4856ab33
MA
1283 array_counter++;
1284
1285 dev_dbg(&sep->pdev->dev,
1286 "lli_table_ptr->bus_address is %08lx\n",
da14e551 1287 (unsigned long)lli_table_ptr->bus_address);
4856ab33
MA
1288 dev_dbg(&sep->pdev->dev,
1289 "lli_table_ptr->block_size is %x\n",
1290 lli_table_ptr->block_size);
1291
6eb44c53 1292 /* Move to the next entry in table */
4856ab33
MA
1293 lli_table_ptr++;
1294 }
1295
6eb44c53 1296 /* Set the info entry to default */
4856ab33
MA
1297 lli_table_ptr->bus_address = 0xffffffff;
1298 lli_table_ptr->block_size = 0;
1299
6eb44c53 1300 /* Set the output parameter */
4856ab33
MA
1301 *num_processed_entries_ptr += array_counter;
1302
4856ab33
MA
1303}
1304
1305/**
d1bb8321
AC
1306 * sep_shared_area_virt_to_bus - map shared area to bus address
1307 * @sep: pointer to struct sep_device
1308 * @virt_address: virtual address to convert
1309 *
4856ab33
MA
1310 * This functions returns the physical address inside shared area according
1311 * to the virtual address. It can be either on the externa RAM device
1312 * (ioremapped), or on the system RAM
1313 * This implementation is for the external RAM
4856ab33
MA
1314 */
1315static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1316 void *virt_address)
1317{
da14e551
AC
1318 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1319 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1320 (unsigned long)
1321 sep->shared_bus + (virt_address - sep->shared_addr));
4856ab33 1322
da14e551 1323 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
4856ab33
MA
1324}
1325
1326/**
d1bb8321
AC
1327 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1328 * @sep: pointer to struct sep_device
1329 * @bus_address: bus address to convert
1330 *
4856ab33
MA
1331 * This functions returns the virtual address inside shared area
1332 * according to the physical address. It can be either on the
1333 * externa RAM device (ioremapped), or on the system RAM
1334 * This implementation is for the external RAM
4856ab33 1335 */
4856ab33
MA
1336static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1337 dma_addr_t bus_address)
1338{
c100fa4d
MA
1339 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1340 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
4856ab33
MA
1341 (size_t)(bus_address - sep->shared_bus)));
1342
da14e551 1343 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
4856ab33
MA
1344}
1345
1346/**
d1bb8321 1347 * sep_debug_print_lli_tables - dump LLI table
4856ab33
MA
1348 * @sep: pointer to struct sep_device
1349 * @lli_table_ptr: pointer to sep_lli_entry
1350 * @num_table_entries: number of entries
1351 * @table_data_size: total data size
d1bb8321
AC
1352 *
1353 * Walk the the list of the print created tables and print all the data
4856ab33
MA
1354 */
1355static void sep_debug_print_lli_tables(struct sep_device *sep,
1356 struct sep_lli_entry *lli_table_ptr,
1357 unsigned long num_table_entries,
1358 unsigned long table_data_size)
1359{
da14e551 1360 unsigned long table_count = 1;
4856ab33
MA
1361 unsigned long entries_count = 0;
1362
da14e551 1363 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
4856ab33 1364
c100fa4d 1365 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
4856ab33
MA
1366 dev_dbg(&sep->pdev->dev,
1367 "lli table %08lx, table_data_size is %lu\n",
1368 table_count, table_data_size);
da14e551
AC
1369 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1370 num_table_entries);
4856ab33 1371
6eb44c53 1372 /* Print entries of the table (without info entry) */
4856ab33
MA
1373 for (entries_count = 0; entries_count < num_table_entries;
1374 entries_count++, lli_table_ptr++) {
1375
1376 dev_dbg(&sep->pdev->dev,
1377 "lli_table_ptr address is %08lx\n",
1378 (unsigned long) lli_table_ptr);
1379
1380 dev_dbg(&sep->pdev->dev,
1381 "phys address is %08lx block size is %x\n",
1382 (unsigned long)lli_table_ptr->bus_address,
1383 lli_table_ptr->block_size);
1384 }
6eb44c53 1385 /* Point to the info entry */
4856ab33
MA
1386 lli_table_ptr--;
1387
1388 dev_dbg(&sep->pdev->dev,
1389 "phys lli_table_ptr->block_size is %x\n",
1390 lli_table_ptr->block_size);
1391
1392 dev_dbg(&sep->pdev->dev,
1393 "phys lli_table_ptr->physical_address is %08lu\n",
1394 (unsigned long)lli_table_ptr->bus_address);
1395
1396
1397 table_data_size = lli_table_ptr->block_size & 0xffffff;
1398 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
4856ab33
MA
1399
1400 dev_dbg(&sep->pdev->dev,
1401 "phys table_data_size is %lu num_table_entries is"
c100fa4d
MA
1402 " %lu bus_address is%lu\n", table_data_size,
1403 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
4856ab33 1404
c100fa4d 1405 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
4856ab33
MA
1406 lli_table_ptr = (struct sep_lli_entry *)
1407 sep_shared_bus_to_virt(sep,
c100fa4d 1408 (unsigned long)lli_table_ptr->bus_address);
4856ab33
MA
1409
1410 table_count++;
1411 }
da14e551 1412 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
4856ab33
MA
1413}
1414
1415
1416/**
d1bb8321 1417 * sep_prepare_empty_lli_table - create a blank LLI table
4856ab33
MA
1418 * @sep: pointer to struct sep_device
1419 * @lli_table_addr_ptr: pointer to lli table
1420 * @num_entries_ptr: pointer to number of entries
1421 * @table_data_size_ptr: point to table data size
d1bb8321
AC
1422 *
1423 * This function creates empty lli tables when there is no data
4856ab33
MA
1424 */
1425static void sep_prepare_empty_lli_table(struct sep_device *sep,
1426 dma_addr_t *lli_table_addr_ptr,
1427 u32 *num_entries_ptr,
1428 u32 *table_data_size_ptr)
1429{
1430 struct sep_lli_entry *lli_table_ptr;
1431
6eb44c53 1432 /* Find the area for new table */
4856ab33
MA
1433 lli_table_ptr =
1434 (struct sep_lli_entry *)(sep->shared_addr +
1435 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
da14e551
AC
1436 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1437 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
4856ab33
MA
1438
1439 lli_table_ptr->bus_address = 0;
1440 lli_table_ptr->block_size = 0;
1441
1442 lli_table_ptr++;
1443 lli_table_ptr->bus_address = 0xFFFFFFFF;
1444 lli_table_ptr->block_size = 0;
1445
6eb44c53 1446 /* Set the output parameter value */
4856ab33
MA
1447 *lli_table_addr_ptr = sep->shared_bus +
1448 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1449 sep->num_lli_tables_created *
1450 sizeof(struct sep_lli_entry) *
1451 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1452
6eb44c53 1453 /* Set the num of entries and table data size for empty table */
4856ab33
MA
1454 *num_entries_ptr = 2;
1455 *table_data_size_ptr = 0;
1456
6eb44c53 1457 /* Update the number of created tables */
4856ab33 1458 sep->num_lli_tables_created++;
4856ab33
MA
1459}
1460
1461/**
d1bb8321 1462 * sep_prepare_input_dma_table - prepare input DMA mappings
4856ab33
MA
1463 * @sep: pointer to struct sep_device
1464 * @data_size:
1465 * @block_size:
1466 * @lli_table_ptr:
1467 * @num_entries_ptr:
1468 * @table_data_size_ptr:
1469 * @is_kva: set for kernel data (kernel cryptio call)
d1bb8321
AC
1470 *
1471 * This function prepares only input DMA table for synhronic symmetric
1472 * operations (HASH)
6eb44c53 1473 * Note that all bus addresses that are passed to the SEP
d1bb8321 1474 * are in 32 bit format; the SEP is a 32 bit device
4856ab33
MA
1475 */
1476static int sep_prepare_input_dma_table(struct sep_device *sep,
1477 unsigned long app_virt_addr,
1478 u32 data_size,
1479 u32 block_size,
1480 dma_addr_t *lli_table_ptr,
1481 u32 *num_entries_ptr,
1482 u32 *table_data_size_ptr,
1483 bool is_kva)
1484{
da14e551 1485 int error = 0;
6eb44c53 1486 /* Pointer to the info entry of the table - the last entry */
4856ab33 1487 struct sep_lli_entry *info_entry_ptr;
6eb44c53 1488 /* Array of pointers to page */
4856ab33 1489 struct sep_lli_entry *lli_array_ptr;
6eb44c53 1490 /* Points to the first entry to be processed in the lli_in_array */
4856ab33 1491 u32 current_entry = 0;
6eb44c53 1492 /* Num entries in the virtual buffer */
4856ab33 1493 u32 sep_lli_entries = 0;
6eb44c53 1494 /* Lli table pointer */
4856ab33 1495 struct sep_lli_entry *in_lli_table_ptr;
6eb44c53 1496 /* The total data in one table */
4856ab33 1497 u32 table_data_size = 0;
6eb44c53 1498 /* Flag for last table */
4856ab33 1499 u32 last_table_flag = 0;
6eb44c53 1500 /* Number of entries in lli table */
4856ab33 1501 u32 num_entries_in_table = 0;
6eb44c53 1502 /* Next table address */
c100fa4d 1503 void *lli_table_alloc_addr = 0;
4856ab33 1504
dfcfc166 1505 dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
da14e551 1506 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
4856ab33 1507
6eb44c53 1508 /* Initialize the pages pointers */
dda16b23 1509 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
4856ab33
MA
1510 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1511
6eb44c53 1512 /* Set the kernel address for first table to be allocated */
c100fa4d 1513 lli_table_alloc_addr = (void *)(sep->shared_addr +
4856ab33 1514 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
da14e551 1515 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
4856ab33
MA
1516 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1517
1518 if (data_size == 0) {
6eb44c53 1519 /* Special case - create meptu table - 2 entries, zero data */
4856ab33 1520 sep_prepare_empty_lli_table(sep, lli_table_ptr,
da14e551 1521 num_entries_ptr, table_data_size_ptr);
4856ab33
MA
1522 goto update_dcb_counter;
1523 }
1524
6eb44c53 1525 /* Check if the pages are in Kernel Virtual Address layout */
4856ab33 1526 if (is_kva == true)
6eb44c53 1527 /* Lock the pages in the kernel */
4856ab33
MA
1528 error = sep_lock_kernel_pages(sep, app_virt_addr,
1529 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
4856ab33 1530 else
4856ab33 1531 /*
6eb44c53 1532 * Lock the pages of the user buffer
4856ab33
MA
1533 * and translate them to pages
1534 */
1535 error = sep_lock_user_pages(sep, app_virt_addr,
1536 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1537
1538 if (error)
1539 goto end_function;
1540
da14e551 1541 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
4856ab33
MA
1542 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1543
1544 current_entry = 0;
dda16b23 1545 info_entry_ptr = NULL;
4856ab33 1546
da14e551 1547 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
4856ab33 1548
6eb44c53 1549 /* Loop till all the entries in in array are not processed */
4856ab33
MA
1550 while (current_entry < sep_lli_entries) {
1551
6eb44c53 1552 /* Set the new input and output tables */
4856ab33
MA
1553 in_lli_table_ptr =
1554 (struct sep_lli_entry *)lli_table_alloc_addr;
1555
1556 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1557 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1558
1559 if (lli_table_alloc_addr >
c100fa4d 1560 ((void *)sep->shared_addr +
4856ab33
MA
1561 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1562 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1563
1564 error = -ENOMEM;
1565 goto end_function_error;
1566
1567 }
1568
6eb44c53 1569 /* Update the number of created tables */
4856ab33
MA
1570 sep->num_lli_tables_created++;
1571
6eb44c53 1572 /* Calculate the maximum size of data for input table */
4856ab33
MA
1573 table_data_size = sep_calculate_lli_table_max_size(sep,
1574 &lli_array_ptr[current_entry],
1575 (sep_lli_entries - current_entry),
1576 &last_table_flag);
1577
da14e551 1578 /*
6eb44c53 1579 * If this is not the last table -
25985edc 1580 * then align it to the block size
4856ab33
MA
1581 */
1582 if (!last_table_flag)
1583 table_data_size =
da14e551 1584 (table_data_size / block_size) * block_size;
4856ab33 1585
da14e551
AC
1586 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1587 table_data_size);
4856ab33 1588
6eb44c53 1589 /* Construct input lli table */
4856ab33
MA
1590 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1591 in_lli_table_ptr,
1592 &current_entry, &num_entries_in_table, table_data_size);
1593
dda16b23 1594 if (info_entry_ptr == NULL) {
4856ab33 1595
6eb44c53 1596 /* Set the output parameters to physical addresses */
4856ab33
MA
1597 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1598 in_lli_table_ptr);
1599 *num_entries_ptr = num_entries_in_table;
1600 *table_data_size_ptr = table_data_size;
1601
1602 dev_dbg(&sep->pdev->dev,
1603 "output lli_table_in_ptr is %08lx\n",
1604 (unsigned long)*lli_table_ptr);
1605
da14e551 1606 } else {
6eb44c53 1607 /* Update the info entry of the previous in table */
4856ab33
MA
1608 info_entry_ptr->bus_address =
1609 sep_shared_area_virt_to_bus(sep,
da14e551 1610 in_lli_table_ptr);
e957b063 1611 info_entry_ptr->block_size =
4856ab33
MA
1612 ((num_entries_in_table) << 24) |
1613 (table_data_size);
1614 }
6eb44c53 1615 /* Save the pointer to the info entry of the current tables */
4856ab33 1616 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
4856ab33 1617 }
6eb44c53 1618 /* Print input tables */
4856ab33
MA
1619 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1620 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
da14e551 1621 *num_entries_ptr, *table_data_size_ptr);
6eb44c53 1622 /* The array of the pages */
4856ab33
MA
1623 kfree(lli_array_ptr);
1624
1625update_dcb_counter:
6eb44c53 1626 /* Update DCB counter */
4856ab33 1627 sep->nr_dcb_creat++;
4856ab33
MA
1628 goto end_function;
1629
1630end_function_error:
6eb44c53 1631 /* Free all the allocated resources */
4856ab33
MA
1632 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1633 kfree(lli_array_ptr);
1634 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1635
1636end_function:
4856ab33
MA
1637 return error;
1638
1639}
1640/**
d1bb8321 1641 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
6eb44c53 1642 * @sep: pointer to struct sep_device
4856ab33
MA
1643 * @lli_in_array:
1644 * @sep_in_lli_entries:
1645 * @lli_out_array:
1646 * @sep_out_lli_entries
1647 * @block_size
1648 * @lli_table_in_ptr
1649 * @lli_table_out_ptr
1650 * @in_num_entries_ptr
1651 * @out_num_entries_ptr
1652 * @table_data_size_ptr
d1bb8321 1653 *
6eb44c53 1654 * This function creates the input and output DMA tables for
d1bb8321
AC
1655 * symmetric operations (AES/DES) according to the block
1656 * size from LLI arays
6eb44c53 1657 * Note that all bus addresses that are passed to the SEP
d1bb8321 1658 * are in 32 bit format; the SEP is a 32 bit device
4856ab33
MA
1659 */
1660static int sep_construct_dma_tables_from_lli(
1661 struct sep_device *sep,
1662 struct sep_lli_entry *lli_in_array,
1663 u32 sep_in_lli_entries,
1664 struct sep_lli_entry *lli_out_array,
1665 u32 sep_out_lli_entries,
1666 u32 block_size,
1667 dma_addr_t *lli_table_in_ptr,
1668 dma_addr_t *lli_table_out_ptr,
1669 u32 *in_num_entries_ptr,
1670 u32 *out_num_entries_ptr,
1671 u32 *table_data_size_ptr)
1672{
6eb44c53 1673 /* Points to the area where next lli table can be allocated */
c100fa4d 1674 void *lli_table_alloc_addr = 0;
6eb44c53 1675 /* Input lli table */
dda16b23 1676 struct sep_lli_entry *in_lli_table_ptr = NULL;
6eb44c53 1677 /* Output lli table */
dda16b23 1678 struct sep_lli_entry *out_lli_table_ptr = NULL;
6eb44c53 1679 /* Pointer to the info entry of the table - the last entry */
dda16b23 1680 struct sep_lli_entry *info_in_entry_ptr = NULL;
6eb44c53 1681 /* Pointer to the info entry of the table - the last entry */
dda16b23 1682 struct sep_lli_entry *info_out_entry_ptr = NULL;
6eb44c53 1683 /* Points to the first entry to be processed in the lli_in_array */
4856ab33 1684 u32 current_in_entry = 0;
6eb44c53 1685 /* Points to the first entry to be processed in the lli_out_array */
4856ab33 1686 u32 current_out_entry = 0;
6eb44c53 1687 /* Max size of the input table */
4856ab33 1688 u32 in_table_data_size = 0;
6eb44c53 1689 /* Max size of the output table */
4856ab33 1690 u32 out_table_data_size = 0;
6eb44c53 1691 /* Flag te signifies if this is the last tables build */
4856ab33 1692 u32 last_table_flag = 0;
6eb44c53 1693 /* The data size that should be in table */
4856ab33 1694 u32 table_data_size = 0;
6eb44c53 1695 /* Number of etnries in the input table */
4856ab33 1696 u32 num_entries_in_table = 0;
6eb44c53 1697 /* Number of etnries in the output table */
4856ab33
MA
1698 u32 num_entries_out_table = 0;
1699
6eb44c53 1700 /* Initiate to point after the message area */
c100fa4d 1701 lli_table_alloc_addr = (void *)(sep->shared_addr +
4856ab33
MA
1702 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1703 (sep->num_lli_tables_created *
1704 (sizeof(struct sep_lli_entry) *
1705 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1706
6eb44c53 1707 /* Loop till all the entries in in array are not processed */
4856ab33 1708 while (current_in_entry < sep_in_lli_entries) {
6eb44c53 1709 /* Set the new input and output tables */
4856ab33
MA
1710 in_lli_table_ptr =
1711 (struct sep_lli_entry *)lli_table_alloc_addr;
1712
1713 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1714 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1715
6eb44c53 1716 /* Set the first output tables */
4856ab33
MA
1717 out_lli_table_ptr =
1718 (struct sep_lli_entry *)lli_table_alloc_addr;
1719
6eb44c53 1720 /* Check if the DMA table area limit was overrun */
4856ab33
MA
1721 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1722 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
c100fa4d 1723 ((void *)sep->shared_addr +
4856ab33
MA
1724 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1725 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1726
da14e551 1727 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
4856ab33
MA
1728 return -ENOMEM;
1729 }
1730
6eb44c53 1731 /* Update the number of the lli tables created */
4856ab33
MA
1732 sep->num_lli_tables_created += 2;
1733
1734 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1735 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1736
6eb44c53 1737 /* Calculate the maximum size of data for input table */
4856ab33
MA
1738 in_table_data_size =
1739 sep_calculate_lli_table_max_size(sep,
1740 &lli_in_array[current_in_entry],
1741 (sep_in_lli_entries - current_in_entry),
1742 &last_table_flag);
1743
6eb44c53 1744 /* Calculate the maximum size of data for output table */
4856ab33
MA
1745 out_table_data_size =
1746 sep_calculate_lli_table_max_size(sep,
1747 &lli_out_array[current_out_entry],
1748 (sep_out_lli_entries - current_out_entry),
1749 &last_table_flag);
1750
1751 dev_dbg(&sep->pdev->dev,
dfcfc166 1752 "construct tables from lli in_table_data_size is %x\n",
4856ab33
MA
1753 in_table_data_size);
1754
1755 dev_dbg(&sep->pdev->dev,
dfcfc166 1756 "construct tables from lli out_table_data_size is %x\n",
4856ab33
MA
1757 out_table_data_size);
1758
1759 table_data_size = in_table_data_size;
1760
1761 if (!last_table_flag) {
1762 /*
6eb44c53 1763 * If this is not the last table,
4856ab33
MA
1764 * then must check where the data is smallest
1765 * and then align it to the block size
1766 */
1767 if (table_data_size > out_table_data_size)
1768 table_data_size = out_table_data_size;
1769
1770 /*
6eb44c53 1771 * Now calculate the table size so that
4856ab33
MA
1772 * it will be module block size
1773 */
1774 table_data_size = (table_data_size / block_size) *
1775 block_size;
1776 }
1777
6eb44c53 1778 /* Construct input lli table */
4856ab33
MA
1779 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1780 in_lli_table_ptr,
1781 &current_in_entry,
1782 &num_entries_in_table,
1783 table_data_size);
1784
6eb44c53 1785 /* Construct output lli table */
4856ab33
MA
1786 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1787 out_lli_table_ptr,
1788 &current_out_entry,
1789 &num_entries_out_table,
1790 table_data_size);
1791
6eb44c53 1792 /* If info entry is null - this is the first table built */
dda16b23 1793 if (info_in_entry_ptr == NULL) {
6eb44c53 1794 /* Set the output parameters to physical addresses */
4856ab33
MA
1795 *lli_table_in_ptr =
1796 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1797
1798 *in_num_entries_ptr = num_entries_in_table;
1799
1800 *lli_table_out_ptr =
1801 sep_shared_area_virt_to_bus(sep,
1802 out_lli_table_ptr);
1803
1804 *out_num_entries_ptr = num_entries_out_table;
1805 *table_data_size_ptr = table_data_size;
1806
1807 dev_dbg(&sep->pdev->dev,
1808 "output lli_table_in_ptr is %08lx\n",
1809 (unsigned long)*lli_table_in_ptr);
1810 dev_dbg(&sep->pdev->dev,
1811 "output lli_table_out_ptr is %08lx\n",
1812 (unsigned long)*lli_table_out_ptr);
1813 } else {
6eb44c53 1814 /* Update the info entry of the previous in table */
4856ab33
MA
1815 info_in_entry_ptr->bus_address =
1816 sep_shared_area_virt_to_bus(sep,
1817 in_lli_table_ptr);
1818
1819 info_in_entry_ptr->block_size =
1820 ((num_entries_in_table) << 24) |
1821 (table_data_size);
1822
6eb44c53 1823 /* Update the info entry of the previous in table */
4856ab33
MA
1824 info_out_entry_ptr->bus_address =
1825 sep_shared_area_virt_to_bus(sep,
1826 out_lli_table_ptr);
1827
1828 info_out_entry_ptr->block_size =
1829 ((num_entries_out_table) << 24) |
1830 (table_data_size);
1831
1832 dev_dbg(&sep->pdev->dev,
1833 "output lli_table_in_ptr:%08lx %08x\n",
1834 (unsigned long)info_in_entry_ptr->bus_address,
1835 info_in_entry_ptr->block_size);
1836
1837 dev_dbg(&sep->pdev->dev,
1838 "output lli_table_out_ptr:%08lx %08x\n",
1839 (unsigned long)info_out_entry_ptr->bus_address,
1840 info_out_entry_ptr->block_size);
1841 }
1842
6eb44c53 1843 /* Save the pointer to the info entry of the current tables */
4856ab33
MA
1844 info_in_entry_ptr = in_lli_table_ptr +
1845 num_entries_in_table - 1;
1846 info_out_entry_ptr = out_lli_table_ptr +
1847 num_entries_out_table - 1;
1848
1849 dev_dbg(&sep->pdev->dev,
1850 "output num_entries_out_table is %x\n",
1851 (u32)num_entries_out_table);
1852 dev_dbg(&sep->pdev->dev,
1853 "output info_in_entry_ptr is %lx\n",
1854 (unsigned long)info_in_entry_ptr);
1855 dev_dbg(&sep->pdev->dev,
1856 "output info_out_entry_ptr is %lx\n",
1857 (unsigned long)info_out_entry_ptr);
1858 }
1859
6eb44c53 1860 /* Print input tables */
4856ab33
MA
1861 sep_debug_print_lli_tables(sep,
1862 (struct sep_lli_entry *)
1863 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
1864 *in_num_entries_ptr,
1865 *table_data_size_ptr);
1866
6eb44c53 1867 /* Print output tables */
4856ab33
MA
1868 sep_debug_print_lli_tables(sep,
1869 (struct sep_lli_entry *)
1870 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
1871 *out_num_entries_ptr,
1872 *table_data_size_ptr);
1873
4856ab33
MA
1874 return 0;
1875}
1876
1877/**
d1bb8321 1878 * sep_prepare_input_output_dma_table - prepare DMA I/O table
4856ab33
MA
1879 * @app_virt_in_addr:
1880 * @app_virt_out_addr:
1881 * @data_size:
1882 * @block_size:
1883 * @lli_table_in_ptr:
1884 * @lli_table_out_ptr:
1885 * @in_num_entries_ptr:
1886 * @out_num_entries_ptr:
1887 * @table_data_size_ptr:
1888 * @is_kva: set for kernel data; used only for kernel crypto module
d1bb8321
AC
1889 *
1890 * This function builds input and output DMA tables for synhronic
1891 * symmetric operations (AES, DES, HASH). It also checks that each table
1892 * is of the modular block size
6eb44c53 1893 * Note that all bus addresses that are passed to the SEP
d1bb8321 1894 * are in 32 bit format; the SEP is a 32 bit device
4856ab33
MA
1895 */
1896static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1897 unsigned long app_virt_in_addr,
1898 unsigned long app_virt_out_addr,
1899 u32 data_size,
1900 u32 block_size,
1901 dma_addr_t *lli_table_in_ptr,
1902 dma_addr_t *lli_table_out_ptr,
1903 u32 *in_num_entries_ptr,
1904 u32 *out_num_entries_ptr,
1905 u32 *table_data_size_ptr,
1906 bool is_kva)
1907
1908{
da14e551 1909 int error = 0;
6eb44c53 1910 /* Array of pointers of page */
4856ab33 1911 struct sep_lli_entry *lli_in_array;
6eb44c53 1912 /* Array of pointers of page */
4856ab33
MA
1913 struct sep_lli_entry *lli_out_array;
1914
4856ab33 1915 if (data_size == 0) {
6eb44c53 1916 /* Prepare empty table for input and output */
4856ab33
MA
1917 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
1918 in_num_entries_ptr, table_data_size_ptr);
1919
1920 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
1921 out_num_entries_ptr, table_data_size_ptr);
1922
1923 goto update_dcb_counter;
1924 }
1925
6eb44c53 1926 /* Initialize the pages pointers */
dda16b23
PH
1927 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1928 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
4856ab33 1929
6eb44c53 1930 /* Lock the pages of the buffer and translate them to pages */
4856ab33
MA
1931 if (is_kva == true) {
1932 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
1933 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1934
1935 if (error) {
1936 dev_warn(&sep->pdev->dev,
1937 "lock kernel for in failed\n");
1938 goto end_function;
1939 }
1940
1941 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
1942 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1943
1944 if (error) {
1945 dev_warn(&sep->pdev->dev,
1946 "lock kernel for out failed\n");
1947 goto end_function;
1948 }
1949 }
1950
1951 else {
1952 error = sep_lock_user_pages(sep, app_virt_in_addr,
1953 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1954 if (error) {
e957b063 1955 dev_warn(&sep->pdev->dev,
da14e551 1956 "sep_lock_user_pages for input virtual buffer failed\n");
4856ab33
MA
1957 goto end_function;
1958 }
1959
1960 error = sep_lock_user_pages(sep, app_virt_out_addr,
1961 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1962
1963 if (error) {
da14e551
AC
1964 dev_warn(&sep->pdev->dev,
1965 "sep_lock_user_pages for output virtual buffer failed\n");
4856ab33
MA
1966 goto end_function_free_lli_in;
1967 }
1968 }
1969
dfcfc166 1970 dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
4856ab33 1971 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
da14e551 1972 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
4856ab33 1973 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
da14e551 1974 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
4856ab33
MA
1975 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1976
25985edc 1977 /* Call the function that creates table from the lli arrays */
4856ab33
MA
1978 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
1979 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
1980 lli_out_array,
1981 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
1982 block_size, lli_table_in_ptr, lli_table_out_ptr,
1983 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1984
1985 if (error) {
1986 dev_warn(&sep->pdev->dev,
1987 "sep_construct_dma_tables_from_lli failed\n");
1988 goto end_function_with_error;
1989 }
1990
1991 kfree(lli_out_array);
1992 kfree(lli_in_array);
1993
1994update_dcb_counter:
6eb44c53 1995 /* Update DCB counter */
4856ab33 1996 sep->nr_dcb_creat++;
4856ab33
MA
1997
1998 goto end_function;
1999
2000end_function_with_error:
4856ab33
MA
2001 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2002 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2003 kfree(lli_out_array);
2004
2005
2006end_function_free_lli_in:
4856ab33
MA
2007 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2008 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2009 kfree(lli_in_array);
2010
2011end_function:
4856ab33
MA
2012
2013 return error;
2014
2015}
2016
2017/**
d1bb8321 2018 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
4856ab33
MA
2019 * @app_in_address: unsigned long; for data buffer in (user space)
2020 * @app_out_address: unsigned long; for data buffer out (user space)
2021 * @data_in_size: u32; for size of data
2022 * @block_size: u32; for block size
2023 * @tail_block_size: u32; for size of tail block
2024 * @isapplet: bool; to indicate external app
2025 * @is_kva: bool; kernel buffer; only used for kernel crypto module
d1bb8321 2026 *
6eb44c53
AC
2027 * This function prepares the linked DMA tables and puts the
2028 * address for the linked list of tables inta a DCB (data control
2029 * block) the address of which is known by the SEP hardware
2030 * Note that all bus addresses that are passed to the SEP
d1bb8321 2031 * are in 32 bit format; the SEP is a 32 bit device
4856ab33
MA
2032 */
2033static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
c100fa4d
MA
2034 unsigned long app_in_address,
2035 unsigned long app_out_address,
2036 u32 data_in_size,
2037 u32 block_size,
2038 u32 tail_block_size,
2039 bool isapplet,
2040 bool is_kva)
4856ab33 2041{
4856ab33 2042 int error = 0;
6eb44c53 2043 /* Size of tail */
4856ab33 2044 u32 tail_size = 0;
6eb44c53 2045 /* Address of the created DCB table */
dda16b23 2046 struct sep_dcblock *dcb_table_ptr = NULL;
6eb44c53 2047 /* The physical address of the first input DMA table */
4856ab33 2048 dma_addr_t in_first_mlli_address = 0;
6eb44c53 2049 /* Number of entries in the first input DMA table */
4856ab33 2050 u32 in_first_num_entries = 0;
6eb44c53 2051 /* The physical address of the first output DMA table */
4856ab33 2052 dma_addr_t out_first_mlli_address = 0;
6eb44c53 2053 /* Number of entries in the first output DMA table */
4856ab33 2054 u32 out_first_num_entries = 0;
6eb44c53 2055 /* Data in the first input/output table */
4856ab33
MA
2056 u32 first_data_size = 0;
2057
4856ab33 2058 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
6eb44c53
AC
2059 /* No more DCBs to allocate */
2060 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
4856ab33
MA
2061 error = -ENOSPC;
2062 goto end_function;
2063 }
2064
6eb44c53 2065 /* Allocate new DCB */
4856ab33
MA
2066 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2067 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2068 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2069
6eb44c53 2070 /* Set the default values in the DCB */
4856ab33
MA
2071 dcb_table_ptr->input_mlli_address = 0;
2072 dcb_table_ptr->input_mlli_num_entries = 0;
2073 dcb_table_ptr->input_mlli_data_size = 0;
2074 dcb_table_ptr->output_mlli_address = 0;
2075 dcb_table_ptr->output_mlli_num_entries = 0;
2076 dcb_table_ptr->output_mlli_data_size = 0;
2077 dcb_table_ptr->tail_data_size = 0;
2078 dcb_table_ptr->out_vr_tail_pt = 0;
2079
2080 if (isapplet == true) {
4856ab33 2081
6eb44c53 2082 /* Check if there is enough data for DMA operation */
4856ab33
MA
2083 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2084 if (is_kva == true) {
2085 memcpy(dcb_table_ptr->tail_data,
2086 (void *)app_in_address, data_in_size);
da14e551 2087 } else {
4856ab33
MA
2088 if (copy_from_user(dcb_table_ptr->tail_data,
2089 (void __user *)app_in_address,
2090 data_in_size)) {
2091 error = -EFAULT;
2092 goto end_function;
2093 }
2094 }
2095
2096 dcb_table_ptr->tail_data_size = data_in_size;
2097
6eb44c53 2098 /* Set the output user-space address for mem2mem op */
4856ab33
MA
2099 if (app_out_address)
2100 dcb_table_ptr->out_vr_tail_pt =
e508edb2 2101 (aligned_u64)app_out_address;
4856ab33
MA
2102
2103 /*
2104 * Update both data length parameters in order to avoid
2105 * second data copy and allow building of empty mlli
2106 * tables
2107 */
2108 tail_size = 0x0;
2109 data_in_size = 0x0;
e508edb2
MA
2110
2111 } else {
2112 if (!app_out_address) {
2113 tail_size = data_in_size % block_size;
2114 if (!tail_size) {
2115 if (tail_block_size == block_size)
2116 tail_size = block_size;
2117 }
2118 } else {
2119 tail_size = 0;
2120 }
4856ab33 2121 }
4856ab33
MA
2122 if (tail_size) {
2123 if (is_kva == true) {
2124 memcpy(dcb_table_ptr->tail_data,
2125 (void *)(app_in_address + data_in_size -
2126 tail_size), tail_size);
da14e551 2127 } else {
6eb44c53 2128 /* We have tail data - copy it to DCB */
4856ab33
MA
2129 if (copy_from_user(dcb_table_ptr->tail_data,
2130 (void *)(app_in_address +
2131 data_in_size - tail_size), tail_size)) {
2132 error = -EFAULT;
2133 goto end_function;
2134 }
2135 }
4856ab33
MA
2136 if (app_out_address)
2137 /*
da14e551 2138 * Calculate the output address
4856ab33
MA
2139 * according to tail data size
2140 */
2141 dcb_table_ptr->out_vr_tail_pt =
e508edb2 2142 (aligned_u64)app_out_address + data_in_size
4856ab33
MA
2143 - tail_size;
2144
da14e551 2145 /* Save the real tail data size */
4856ab33
MA
2146 dcb_table_ptr->tail_data_size = tail_size;
2147 /*
2148 * Update the data size without the tail
2149 * data size AKA data for the dma
2150 */
2151 data_in_size = (data_in_size - tail_size);
2152 }
2153 }
6eb44c53 2154 /* Check if we need to build only input table or input/output */
4856ab33 2155 if (app_out_address) {
6eb44c53 2156 /* Prepare input/output tables */
4856ab33
MA
2157 error = sep_prepare_input_output_dma_table(sep,
2158 app_in_address,
2159 app_out_address,
2160 data_in_size,
2161 block_size,
2162 &in_first_mlli_address,
2163 &out_first_mlli_address,
2164 &in_first_num_entries,
2165 &out_first_num_entries,
2166 &first_data_size,
2167 is_kva);
da14e551 2168 } else {
6eb44c53 2169 /* Prepare input tables */
4856ab33
MA
2170 error = sep_prepare_input_dma_table(sep,
2171 app_in_address,
2172 data_in_size,
2173 block_size,
2174 &in_first_mlli_address,
2175 &in_first_num_entries,
2176 &first_data_size,
2177 is_kva);
2178 }
2179
2180 if (error) {
6eb44c53 2181 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
4856ab33
MA
2182 goto end_function;
2183 }
2184
6eb44c53 2185 /* Set the DCB values */
4856ab33
MA
2186 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2187 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2188 dcb_table_ptr->input_mlli_data_size = first_data_size;
2189 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2190 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2191 dcb_table_ptr->output_mlli_data_size = first_data_size;
2192
2193end_function:
4856ab33
MA
2194 return error;
2195
2196}
2197
4856ab33 2198/**
d1bb8321 2199 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
4856ab33
MA
2200 * @sep: pointer to struct sep_device
2201 * @isapplet: indicates external application (used for kernel access)
2202 * @is_kva: indicates kernel addresses (only used for kernel crypto)
d1bb8321 2203 *
6eb44c53 2204 * This function frees the DMA tables and DCB
4856ab33
MA
2205 */
2206static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2207 bool is_kva)
2208{
4856ab33
MA
2209 int i = 0;
2210 int error = 0;
2211 int error_temp = 0;
4856ab33 2212 struct sep_dcblock *dcb_table_ptr;
c100fa4d
MA
2213 unsigned long pt_hold;
2214 void *tail_pt;
4856ab33 2215
4856ab33 2216 if (isapplet == true) {
6eb44c53 2217 /* Set pointer to first DCB table */
4856ab33
MA
2218 dcb_table_ptr = (struct sep_dcblock *)
2219 (sep->shared_addr +
2220 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2221
6eb44c53 2222 /* Go over each DCB and see if tail pointer must be updated */
4856ab33 2223 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
4856ab33 2224 if (dcb_table_ptr->out_vr_tail_pt) {
c100fa4d
MA
2225 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2226 tail_pt = (void *)pt_hold;
4856ab33 2227 if (is_kva == true) {
c100fa4d 2228 memcpy(tail_pt,
4856ab33
MA
2229 dcb_table_ptr->tail_data,
2230 dcb_table_ptr->tail_data_size);
da14e551 2231 } else {
4856ab33 2232 error_temp = copy_to_user(
c100fa4d 2233 tail_pt,
4856ab33
MA
2234 dcb_table_ptr->tail_data,
2235 dcb_table_ptr->tail_data_size);
2236 }
4856ab33 2237 if (error_temp) {
6eb44c53 2238 /* Release the DMA resource */
da14e551
AC
2239 error = -EFAULT;
2240 break;
4856ab33
MA
2241 }
2242 }
2243 }
2244 }
6eb44c53 2245 /* Free the output pages, if any */
4856ab33
MA
2246 sep_free_dma_table_data_handler(sep);
2247
4856ab33
MA
2248 return error;
2249}
2250
2251/**
d1bb8321 2252 * sep_get_static_pool_addr_handler - get static pool address
4856ab33 2253 * @sep: pointer to struct sep_device
d1bb8321
AC
2254 *
2255 * This function sets the bus and virtual addresses of the static pool
4856ab33 2256 */
f1566275 2257static int sep_get_static_pool_addr_handler(struct sep_device *sep)
4856ab33 2258{
dda16b23 2259 u32 *static_pool_addr = NULL;
4856ab33 2260
4856ab33
MA
2261 static_pool_addr = (u32 *)(sep->shared_addr +
2262 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2263
2264 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
f1566275 2265 static_pool_addr[1] = (u32)sep->shared_bus +
4856ab33
MA
2266 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2267
dfcfc166 2268 dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
f1566275 2269 (u32)static_pool_addr[1]);
4856ab33 2270
4856ab33
MA
2271 return 0;
2272}
2273
4856ab33 2274/**
d1bb8321 2275 * sep_end_transaction_handler - end transaction
4856ab33 2276 * @sep: pointer to struct sep_device
d1bb8321
AC
2277 *
2278 * This API handles the end transaction request
4856ab33
MA
2279 */
2280static int sep_end_transaction_handler(struct sep_device *sep)
2281{
6eb44c53 2282 /* Clear the data pool pointers Token */
4856ab33
MA
2283 memset((void *)(sep->shared_addr +
2284 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2285 0, sep->num_of_data_allocations*2*sizeof(u32));
2286
6eb44c53 2287 /* Check that all the DMA resources were freed */
4856ab33
MA
2288 sep_free_dma_table_data_handler(sep);
2289
2290 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2291
2292 /*
6eb44c53 2293 * We are now through with the transaction. Let's
4856ab33
MA
2294 * allow other processes who have the device open
2295 * to perform transactions
2296 */
2297 mutex_lock(&sep->sep_mutex);
2298 sep->pid_doing_transaction = 0;
2299 mutex_unlock(&sep->sep_mutex);
6eb44c53 2300 /* Raise event for stuck contextes */
4856ab33
MA
2301 wake_up(&sep->event);
2302
4856ab33
MA
2303 return 0;
2304}
2305
2306/**
e957b063 2307 * sep_prepare_dcb_handler - prepare a control block
4856ab33
MA
2308 * @sep: pointer to struct sep_device
2309 * @arg: pointer to user parameters
d1bb8321
AC
2310 *
2311 * This function will retrieve the RAR buffer physical addresses, type
2312 * & size corresponding to the RAR handles provided in the buffers vector.
4856ab33 2313 */
4856ab33
MA
2314static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2315{
da14e551 2316 int error;
6eb44c53 2317 /* Command arguments */
4856ab33
MA
2318 struct build_dcb_struct command_args;
2319
da14e551 2320 /* Get the command arguments */
4856ab33 2321 if (copy_from_user(&command_args, (void __user *)arg,
da14e551 2322 sizeof(struct build_dcb_struct))) {
4856ab33
MA
2323 error = -EFAULT;
2324 goto end_function;
2325 }
2326
dfcfc166 2327 dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
da14e551
AC
2328 command_args.app_in_address);
2329 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2330 command_args.app_out_address);
2331 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2332 command_args.data_in_size);
2333 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2334 command_args.block_size);
2335 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2336 command_args.tail_block_size);
4856ab33
MA
2337
2338 error = sep_prepare_input_output_dma_table_in_dcb(sep,
c100fa4d
MA
2339 (unsigned long)command_args.app_in_address,
2340 (unsigned long)command_args.app_out_address,
4856ab33
MA
2341 command_args.data_in_size, command_args.block_size,
2342 command_args.tail_block_size, true, false);
2343
2344end_function:
4856ab33
MA
2345 return error;
2346
2347}
2348
2349/**
d1bb8321 2350 * sep_free_dcb_handler - free control block resources
4856ab33 2351 * @sep: pointer to struct sep_device
d1bb8321
AC
2352 *
2353 * This function frees the DCB resources and updates the needed
2354 * user-space buffers.
4856ab33
MA
2355 */
2356static int sep_free_dcb_handler(struct sep_device *sep)
2357{
836aded1 2358 return sep_free_dma_tables_and_dcb(sep, false, false);
4856ab33
MA
2359}
2360
2361/**
e957b063 2362 * sep_rar_prepare_output_msg_handler - prepare an output message
4856ab33
MA
2363 * @sep: pointer to struct sep_device
2364 * @arg: pointer to user parameters
d1bb8321
AC
2365 *
2366 * This function will retrieve the RAR buffer physical addresses, type
2367 * & size corresponding to the RAR handles provided in the buffers vector.
4856ab33 2368 */
4856ab33
MA
2369static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2370 unsigned long arg)
2371{
4856ab33 2372 int error = 0;
6eb44c53 2373 /* Command args */
4856ab33
MA
2374 struct rar_hndl_to_bus_struct command_args;
2375 struct RAR_buffer rar_buf;
6eb44c53 2376 /* Bus address */
4856ab33 2377 dma_addr_t rar_bus = 0;
6eb44c53 2378 /* Holds the RAR address in the system memory offset */
4856ab33
MA
2379 u32 *rar_addr;
2380
6eb44c53 2381 /* Copy the data */
da14e551
AC
2382 if (copy_from_user(&command_args, (void __user *)arg,
2383 sizeof(command_args))) {
4856ab33
MA
2384 error = -EFAULT;
2385 goto end_function;
2386 }
2387
6eb44c53 2388 /* Call to translation function only if user handle is not NULL */
4856ab33 2389 if (command_args.rar_handle) {
4856ab33
MA
2390 memset(&rar_buf, 0, sizeof(rar_buf));
2391 rar_buf.info.handle = (u32)command_args.rar_handle;
2392
2393 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
4856ab33
MA
2394 error = -EFAULT;
2395 goto end_function;
2396 }
4856ab33
MA
2397 rar_bus = rar_buf.bus_address;
2398 }
da14e551 2399 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
4856ab33 2400
6eb44c53 2401 /* Set value in the SYSTEM MEMORY offset */
4856ab33
MA
2402 rar_addr = (u32 *)(sep->shared_addr +
2403 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2404
6eb44c53 2405 /* Copy the physical address to the System Area for the SEP */
4856ab33
MA
2406 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2407 rar_addr[1] = rar_bus;
2408
2409end_function:
4856ab33
MA
2410 return error;
2411}
2412
4856ab33
MA
2413/**
2414 * sep_ioctl - ioctl api
2415 * @filp: pointer to struct file
2416 * @cmd: command
2417 * @arg: pointer to argument structure
d1bb8321 2418 *
25985edc 2419 * Implement the ioctl methods available on the SEP device.
4856ab33
MA
2420 */
2421static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2422{
2423 int error = 0;
2424 struct sep_device *sep = filp->private_data;
2425
6eb44c53 2426 /* Make sure we own this device */
4856ab33
MA
2427 mutex_lock(&sep->sep_mutex);
2428 if ((current->pid != sep->pid_doing_transaction) &&
da14e551 2429 (sep->pid_doing_transaction != 0)) {
836aded1 2430 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
4856ab33
MA
2431 error = -EACCES;
2432 goto end_function;
2433 }
2434
2435 mutex_unlock(&sep->sep_mutex);
2436
836aded1
AC
2437 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2438 return -ENOTTY;
4856ab33 2439
6eb44c53 2440 /* Lock to prevent the daemon to interfere with operation */
4856ab33
MA
2441 mutex_lock(&sep->ioctl_mutex);
2442
2443 switch (cmd) {
2444 case SEP_IOCSENDSEPCOMMAND:
6eb44c53 2445 /* Send command to SEP */
4856ab33
MA
2446 error = sep_send_command_handler(sep);
2447 break;
2448 case SEP_IOCALLOCDATAPOLL:
6eb44c53 2449 /* Allocate data pool */
4856ab33
MA
2450 error = sep_allocate_data_pool_memory_handler(sep, arg);
2451 break;
4856ab33 2452 case SEP_IOCGETSTATICPOOLADDR:
bc657f6e 2453 /* Inform the SEP the bus address of the static pool */
f1566275 2454 error = sep_get_static_pool_addr_handler(sep);
4856ab33
MA
2455 break;
2456 case SEP_IOCENDTRANSACTION:
2457 error = sep_end_transaction_handler(sep);
2458 break;
4856ab33
MA
2459 case SEP_IOCRARPREPAREMESSAGE:
2460 error = sep_rar_prepare_output_msg_handler(sep, arg);
2461 break;
2462 case SEP_IOCPREPAREDCB:
2463 error = sep_prepare_dcb_handler(sep, arg);
2464 break;
2465 case SEP_IOCFREEDCB:
2466 error = sep_free_dcb_handler(sep);
2467 break;
2468 default:
4856ab33
MA
2469 error = -ENOTTY;
2470 break;
2471 }
4856ab33
MA
2472
2473end_function:
836aded1 2474 mutex_unlock(&sep->ioctl_mutex);
4856ab33
MA
2475 return error;
2476}
2477
2478/**
2479 * sep_singleton_ioctl - ioctl api for singleton interface
2480 * @filp: pointer to struct file
2481 * @cmd: command
2482 * @arg: pointer to argument structure
d1bb8321
AC
2483 *
2484 * Implement the additional ioctls for the singleton device
4856ab33
MA
2485 */
2486static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
2487{
da14e551 2488 long error = 0;
4856ab33
MA
2489 struct sep_device *sep = filp->private_data;
2490
6eb44c53 2491 /* Check that the command is for the SEP device */
836aded1
AC
2492 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2493 return -ENOTTY;
4856ab33 2494
6eb44c53 2495 /* Make sure we own this device */
4856ab33
MA
2496 mutex_lock(&sep->sep_mutex);
2497 if ((current->pid != sep->pid_doing_transaction) &&
da14e551 2498 (sep->pid_doing_transaction != 0)) {
836aded1 2499 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
4856ab33 2500 mutex_unlock(&sep->sep_mutex);
836aded1 2501 return -EACCES;
4856ab33
MA
2502 }
2503
2504 mutex_unlock(&sep->sep_mutex);
2505
2506 switch (cmd) {
4856ab33
MA
2507 case SEP_IOCTLSETCALLERID:
2508 mutex_lock(&sep->ioctl_mutex);
2509 error = sep_set_caller_id_handler(sep, arg);
2510 mutex_unlock(&sep->ioctl_mutex);
2511 break;
4856ab33
MA
2512 default:
2513 error = sep_ioctl(filp, cmd, arg);
2514 break;
4856ab33 2515 }
4856ab33
MA
2516 return error;
2517}
2518
2519/**
2520 * sep_request_daemon_ioctl - ioctl for daemon
2521 * @filp: pointer to struct file
2522 * @cmd: command
2523 * @arg: pointer to argument structure
d1bb8321 2524 *
4856ab33
MA
2525 * Called by the request daemon to perform ioctls on the daemon device
2526 */
4856ab33
MA
2527static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
2528 unsigned long arg)
2529{
2530
2531 long error;
4856ab33
MA
2532 struct sep_device *sep = filp->private_data;
2533
6eb44c53 2534 /* Check that the command is for SEP device */
836aded1
AC
2535 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2536 return -ENOTTY;
4856ab33 2537
6eb44c53 2538 /* Only one process can access ioctl at any given time */
4856ab33
MA
2539 mutex_lock(&sep->ioctl_mutex);
2540
2541 switch (cmd) {
2542 case SEP_IOCSENDSEPRPLYCOMMAND:
6eb44c53 2543 /* Send reply command to SEP */
4856ab33
MA
2544 error = sep_req_daemon_send_reply_command_handler(sep);
2545 break;
4856ab33 2546 case SEP_IOCENDTRANSACTION:
4856ab33 2547 /*
6eb44c53 2548 * End req daemon transaction, do nothing
4856ab33
MA
2549 * will be removed upon update in middleware
2550 * API library
2551 */
2552 error = 0;
2553 break;
4856ab33 2554 default:
4856ab33
MA
2555 error = -ENOTTY;
2556 }
4856ab33 2557 mutex_unlock(&sep->ioctl_mutex);
4856ab33 2558 return error;
4856ab33
MA
2559}
2560
2561/**
6eb44c53 2562 * sep_inthandler - interrupt handler
4856ab33
MA
2563 * @irq: interrupt
2564 * @dev_id: device id
2565 */
4856ab33
MA
2566static irqreturn_t sep_inthandler(int irq, void *dev_id)
2567{
2568 irqreturn_t int_error = IRQ_HANDLED;
2569 unsigned long lck_flags;
2570 u32 reg_val, reg_val2 = 0;
2571 struct sep_device *sep = dev_id;
2572
6eb44c53 2573 /* Read the IRR register to check if this is SEP interrupt */
4856ab33 2574 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
4856ab33
MA
2575
2576 if (reg_val & (0x1 << 13)) {
6eb44c53 2577 /* Lock and update the counter of reply messages */
4856ab33
MA
2578 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
2579 sep->reply_ct++;
2580 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
2581
2582 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
da14e551 2583 sep->send_ct, sep->reply_ct);
4856ab33 2584
6eb44c53 2585 /* Is this printf or daemon request? */
4856ab33
MA
2586 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2587 dev_dbg(&sep->pdev->dev,
2588 "SEP Interrupt - reg2 is %08x\n", reg_val2);
2589
2590 if ((reg_val2 >> 30) & 0x1) {
4856ab33
MA
2591 dev_dbg(&sep->pdev->dev, "int: printf request\n");
2592 wake_up(&sep->event_request_daemon);
da14e551 2593 } else if (reg_val2 >> 31) {
4856ab33
MA
2594 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
2595 wake_up(&sep->event_request_daemon);
2596 } else {
6eb44c53 2597 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
4856ab33
MA
2598 wake_up(&sep->event);
2599 }
4856ab33 2600 } else {
6eb44c53 2601 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
4856ab33
MA
2602 int_error = IRQ_NONE;
2603 }
4856ab33
MA
2604 if (int_error == IRQ_HANDLED)
2605 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2606
2607 return int_error;
2608}
2609
da3f825b
AC
2610/**
2611 * sep_reconfig_shared_area - reconfigure shared area
2612 * @sep: pointer to struct sep_device
2613 *
2614 * Reconfig the shared area between HOST and SEP - needed in case
2615 * the DX_CC_Init function was called before OS loading.
2616 */
2617static int sep_reconfig_shared_area(struct sep_device *sep)
2618{
2619 int ret_val;
2620
be38efe1
MA
2621 /* use to limit waiting for SEP */
2622 unsigned long end_time;
2623
da3f825b 2624 /* Send the new SHARED MESSAGE AREA to the SEP */
dfcfc166 2625 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
da3f825b
AC
2626 (unsigned long long)sep->shared_bus);
2627
2628 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2629
2630 /* Poll for SEP response */
2631 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2632
be38efe1
MA
2633 end_time = jiffies + (WAIT_TIME * HZ);
2634
2635 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
2636 (ret_val != sep->shared_bus))
da3f825b
AC
2637 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2638
2639 /* Check the return value (register) */
2640 if (ret_val != sep->shared_bus) {
2641 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
2642 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
2643 ret_val = -ENOMEM;
2644 } else
2645 ret_val = 0;
2646
2647 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
2648 return ret_val;
2649}
2650
2651/* File operation for singleton SEP operations */
2652static const struct file_operations singleton_file_operations = {
2653 .owner = THIS_MODULE,
2654 .unlocked_ioctl = sep_singleton_ioctl,
2655 .poll = sep_poll,
2656 .open = sep_singleton_open,
2657 .release = sep_singleton_release,
2658 .mmap = sep_mmap,
2659};
2660
2661/* File operation for daemon operations */
2662static const struct file_operations daemon_file_operations = {
2663 .owner = THIS_MODULE,
2664 .unlocked_ioctl = sep_request_daemon_ioctl,
2665 .poll = sep_request_daemon_poll,
2666 .open = sep_request_daemon_open,
2667 .release = sep_request_daemon_release,
2668 .mmap = sep_request_daemon_mmap,
2669};
2670
2671/* The files operations structure of the driver */
2672static const struct file_operations sep_file_operations = {
2673 .owner = THIS_MODULE,
2674 .unlocked_ioctl = sep_ioctl,
2675 .poll = sep_poll,
2676 .open = sep_open,
2677 .release = sep_release,
2678 .mmap = sep_mmap,
2679};
2680
2681/**
2682 * sep_register_driver_with_fs - register misc devices
2683 * @sep: pointer to struct sep_device
2684 *
2685 * This function registers the driver with the file system
2686 */
2687static int sep_register_driver_with_fs(struct sep_device *sep)
2688{
2689 int ret_val;
2690
2691 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
2692 sep->miscdev_sep.name = SEP_DEV_NAME;
2693 sep->miscdev_sep.fops = &sep_file_operations;
2694
2695 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
2696 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
2697 sep->miscdev_singleton.fops = &singleton_file_operations;
2698
2699 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
2700 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
2701 sep->miscdev_daemon.fops = &daemon_file_operations;
2702
2703 ret_val = misc_register(&sep->miscdev_sep);
2704 if (ret_val) {
2705 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
2706 ret_val);
2707 return ret_val;
2708 }
2709
2710 ret_val = misc_register(&sep->miscdev_singleton);
2711 if (ret_val) {
2712 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
2713 ret_val);
2714 misc_deregister(&sep->miscdev_sep);
2715 return ret_val;
2716 }
2717
0dd12c44
MA
2718 ret_val = misc_register(&sep->miscdev_daemon);
2719 if (ret_val) {
2720 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
2721 ret_val);
2722 misc_deregister(&sep->miscdev_sep);
2723 misc_deregister(&sep->miscdev_singleton);
da3f825b 2724
0dd12c44 2725 return ret_val;
da3f825b
AC
2726 }
2727 return ret_val;
2728}
2729
2730
4856ab33 2731/**
d1bb8321 2732 * sep_probe - probe a matching PCI device
4856ab33
MA
2733 * @pdev: pci_device
2734 * @end: pci_device_id
d1bb8321
AC
2735 *
2736 * Attempt to set up and configure a SEP device that has been
2737 * discovered by the PCI layer.
4856ab33
MA
2738 */
2739static int __devinit sep_probe(struct pci_dev *pdev,
2740 const struct pci_device_id *ent)
2741{
2742 int error = 0;
2743 struct sep_device *sep;
2744
4856ab33
MA
2745 if (sep_dev != NULL) {
2746 dev_warn(&pdev->dev, "only one SEP supported.\n");
2747 return -EBUSY;
2748 }
2749
6eb44c53 2750 /* Enable the device */
4856ab33
MA
2751 error = pci_enable_device(pdev);
2752 if (error) {
2753 dev_warn(&pdev->dev, "error enabling pci device\n");
2754 goto end_function;
2755 }
2756
6eb44c53 2757 /* Allocate the sep_device structure for this device */
7c9eb691 2758 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4856ab33
MA
2759 if (sep_dev == NULL) {
2760 dev_warn(&pdev->dev,
2761 "can't kmalloc the sep_device structure\n");
843f65c6
MA
2762 error = -ENOMEM;
2763 goto end_function_disable_device;
4856ab33
MA
2764 }
2765
4856ab33 2766 /*
6eb44c53 2767 * We're going to use another variable for actually
4856ab33
MA
2768 * working with the device; this way, if we have
2769 * multiple devices in the future, it would be easier
2770 * to make appropriate changes
2771 */
2772 sep = sep_dev;
2773
843f65c6 2774 sep->pdev = pci_dev_get(pdev);
4856ab33 2775
da3f825b
AC
2776 init_waitqueue_head(&sep->event);
2777 init_waitqueue_head(&sep->event_request_daemon);
2778 spin_lock_init(&sep->snd_rply_lck);
2779 mutex_init(&sep->sep_mutex);
2780 mutex_init(&sep->ioctl_mutex);
2781
dfcfc166 2782 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
4856ab33
MA
2783 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
2784
6eb44c53 2785 /* Set up our register area */
4856ab33
MA
2786 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
2787 if (!sep->reg_physical_addr) {
2788 dev_warn(&sep->pdev->dev, "Error getting register start\n");
843f65c6
MA
2789 error = -ENODEV;
2790 goto end_function_free_sep_dev;
4856ab33
MA
2791 }
2792
2793 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
2794 if (!sep->reg_physical_end) {
2795 dev_warn(&sep->pdev->dev, "Error getting register end\n");
843f65c6
MA
2796 error = -ENODEV;
2797 goto end_function_free_sep_dev;
4856ab33
MA
2798 }
2799
2800 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
2801 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
2802 if (!sep->reg_addr) {
2803 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
843f65c6
MA
2804 error = -ENODEV;
2805 goto end_function_free_sep_dev;
4856ab33
MA
2806 }
2807
2808 dev_dbg(&sep->pdev->dev,
2809 "Register area start %llx end %llx virtual %p\n",
2810 (unsigned long long)sep->reg_physical_addr,
2811 (unsigned long long)sep->reg_physical_end,
2812 sep->reg_addr);
2813
6eb44c53 2814 /* Allocate the shared area */
4856ab33
MA
2815 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2816 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
2817 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
2818 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
2819 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2820
2821 if (sep_map_and_alloc_shared_area(sep)) {
2822 error = -ENOMEM;
6eb44c53 2823 /* Allocation failed */
4856ab33
MA
2824 goto end_function_error;
2825 }
2826
6eb44c53 2827 /* Clear ICR register */
4856ab33
MA
2828 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2829
6eb44c53 2830 /* Set the IMR register - open only GPR 2 */
4856ab33
MA
2831 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2832
08740c97
MA
2833 /* Read send/receive counters from SEP */
2834 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2835 sep->reply_ct &= 0x3FFFFFFF;
2836 sep->send_ct = sep->reply_ct;
2837
6eb44c53 2838 /* Get the interrupt line */
4856ab33
MA
2839 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
2840 "sep_driver", sep);
2841
da3f825b 2842 if (error)
1254a459 2843 goto end_function_deallocate_sep_shared_area;
4856ab33 2844
836aded1 2845 /* The new chip requires a shared area reconfigure */
da3f825b
AC
2846 if (sep->pdev->revision == 4) { /* Only for new chip */
2847 error = sep_reconfig_shared_area(sep);
2848 if (error)
2849 goto end_function_free_irq;
2850 }
2851 /* Finally magic up the device nodes */
2852 /* Register driver with the fs */
2853 error = sep_register_driver_with_fs(sep);
2854 if (error == 0)
2855 /* Success */
2856 return 0;
e957b063 2857
da3f825b
AC
2858end_function_free_irq:
2859 free_irq(pdev->irq, sep);
2860
4856ab33 2861end_function_deallocate_sep_shared_area:
6eb44c53 2862 /* De-allocate shared area */
4856ab33
MA
2863 sep_unmap_and_free_shared_area(sep);
2864
2865end_function_error:
2866 iounmap(sep->reg_addr);
843f65c6
MA
2867
2868end_function_free_sep_dev:
2869 pci_dev_put(sep_dev->pdev);
4856ab33
MA
2870 kfree(sep_dev);
2871 sep_dev = NULL;
2872
843f65c6
MA
2873end_function_disable_device:
2874 pci_disable_device(pdev);
2875
4856ab33
MA
2876end_function:
2877 return error;
2878}
2879
da3f825b
AC
2880static void sep_remove(struct pci_dev *pdev)
2881{
2882 struct sep_device *sep = sep_dev;
2883
2884 /* Unregister from fs */
2885 misc_deregister(&sep->miscdev_sep);
2886 misc_deregister(&sep->miscdev_singleton);
2887 misc_deregister(&sep->miscdev_daemon);
2888
2889 /* Free the irq */
2890 free_irq(sep->pdev->irq, sep);
2891
2892 /* Free the shared area */
2893 sep_unmap_and_free_shared_area(sep_dev);
2894 iounmap((void *) sep_dev->reg_addr);
2895}
2896
4856ab33 2897static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4856ab33
MA
2898 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
2899 {0}
2900};
2901
2902MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2903
6eb44c53 2904/* Field for registering driver to PCI device */
4856ab33
MA
2905static struct pci_driver sep_pci_driver = {
2906 .name = "sep_sec_driver",
2907 .id_table = sep_pci_id_tbl,
da3f825b
AC
2908 .probe = sep_probe,
2909 .remove = sep_remove
4856ab33
MA
2910};
2911
4856ab33
MA
2912
2913/**
d1bb8321
AC
2914 * sep_init - init function
2915 *
2916 * Module load time. Register the PCI device driver.
4856ab33
MA
2917 */
2918static int __init sep_init(void)
2919{
da3f825b 2920 return pci_register_driver(&sep_pci_driver);
4856ab33
MA
2921}
2922
2923
2924/**
e957b063 2925 * sep_exit - called to unload driver
d1bb8321
AC
2926 *
2927 * Drop the misc devices then remove and unmap the various resources
2928 * that are not released by the driver remove method.
4856ab33
MA
2929 */
2930static void __exit sep_exit(void)
2931{
4856ab33 2932 pci_unregister_driver(&sep_pci_driver);
4856ab33
MA
2933}
2934
2935
2936module_init(sep_init);
2937module_exit(sep_exit);
2938
2939MODULE_LICENSE("GPL");