3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/sched.h>
65 #include <linux/delay.h>
66 #include <linux/jiffies.h>
67 #include <linux/async.h>
68 #include <linux/crypto.h>
69 #include <crypto/internal/hash.h>
70 #include <crypto/scatterwalk.h>
71 #include <crypto/sha.h>
72 #include <crypto/md5.h>
73 #include <crypto/aes.h>
74 #include <crypto/des.h>
75 #include <crypto/hash.h>
77 #include "sep_driver_hw_defs.h"
78 #include "sep_driver_config.h"
79 #include "sep_driver_api.h"
81 #include "sep_crypto.h"
83 #define CREATE_TRACE_POINTS
84 #include "sep_trace_events.h"
87 * Let's not spend cycles iterating over message
88 * area contents if debugging not enabled
91 #define sep_dump_message(sep) _sep_dump_message(sep)
93 #define sep_dump_message(sep)
97 * Currenlty, there is only one SEP device per platform;
98 * In event platforms in the future have more than one SEP
99 * device, this will be a linked list
102 struct sep_device
*sep_dev
;
105 * sep_queue_status_remove - Removes transaction from status queue
107 * @sep_queue_info: pointer to status queue
109 * This function will removes information about transaction from the queue.
111 void sep_queue_status_remove(struct sep_device
*sep
,
112 struct sep_queue_info
**queue_elem
)
114 unsigned long lck_flags
;
116 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove\n",
119 if (!queue_elem
|| !(*queue_elem
)) {
120 dev_dbg(&sep
->pdev
->dev
, "PID%d %s null\n",
121 current
->pid
, __func__
);
125 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
126 list_del(&(*queue_elem
)->list
);
127 sep
->sep_queue_num
--;
128 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
133 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove return\n",
139 * sep_queue_status_add - Adds transaction to status queue
141 * @opcode: transaction opcode
142 * @size: input data size
143 * @pid: pid of current process
144 * @name: current process name
145 * @name_len: length of name (current process)
147 * This function adds information about about transaction started to the status
150 struct sep_queue_info
*sep_queue_status_add(
151 struct sep_device
*sep
,
155 u8
*name
, size_t name_len
)
157 unsigned long lck_flags
;
158 struct sep_queue_info
*my_elem
= NULL
;
160 my_elem
= kzalloc(sizeof(struct sep_queue_info
), GFP_KERNEL
);
165 dev_dbg(&sep
->pdev
->dev
, "[PID%d] kzalloc ok\n", current
->pid
);
167 my_elem
->data
.opcode
= opcode
;
168 my_elem
->data
.size
= size
;
169 my_elem
->data
.pid
= pid
;
171 if (name_len
> TASK_COMM_LEN
)
172 name_len
= TASK_COMM_LEN
;
174 memcpy(&my_elem
->data
.name
, name
, name_len
);
176 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
178 list_add_tail(&my_elem
->list
, &sep
->sep_queue_status
);
179 sep
->sep_queue_num
++;
181 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
187 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
189 * @dmatables_region: Destination pointer for the buffer
190 * @dma_ctx: DMA context for the transaction
191 * @table_count: Number of MLLI/DMA tables to create
192 * The buffer created will not work as-is for DMA operations,
193 * it needs to be copied over to the appropriate place in the
196 static int sep_allocate_dmatables_region(struct sep_device
*sep
,
197 void **dmatables_region
,
198 struct sep_dma_context
*dma_ctx
,
199 const u32 table_count
)
201 const size_t new_len
=
202 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
204 void *tmp_region
= NULL
;
206 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma_ctx = 0x%p\n",
207 current
->pid
, dma_ctx
);
208 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dmatables_region = 0x%p\n",
209 current
->pid
, dmatables_region
);
211 if (!dma_ctx
|| !dmatables_region
) {
212 dev_warn(&sep
->pdev
->dev
,
213 "[PID%d] dma context/region uninitialized\n",
218 dev_dbg(&sep
->pdev
->dev
, "[PID%d] newlen = 0x%08zX\n",
219 current
->pid
, new_len
);
220 dev_dbg(&sep
->pdev
->dev
, "[PID%d] oldlen = 0x%08X\n", current
->pid
,
221 dma_ctx
->dmatables_len
);
222 tmp_region
= kzalloc(new_len
+ dma_ctx
->dmatables_len
, GFP_KERNEL
);
224 dev_warn(&sep
->pdev
->dev
,
225 "[PID%d] no mem for dma tables region\n",
230 /* Were there any previous tables that need to be preserved ? */
231 if (*dmatables_region
) {
232 memcpy(tmp_region
, *dmatables_region
, dma_ctx
->dmatables_len
);
233 kfree(*dmatables_region
);
234 *dmatables_region
= NULL
;
237 *dmatables_region
= tmp_region
;
239 dma_ctx
->dmatables_len
+= new_len
;
245 * sep_wait_transaction - Used for synchronizing transactions
248 int sep_wait_transaction(struct sep_device
*sep
)
253 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
254 &sep
->in_use_flags
)) {
255 dev_dbg(&sep
->pdev
->dev
,
256 "[PID%d] no transactions, returning\n",
258 goto end_function_setpid
;
262 * Looping needed even for exclusive waitq entries
263 * due to process wakeup latencies, previous process
264 * might have already created another transaction.
268 * Exclusive waitq entry, so that only one process is
269 * woken up from the queue at a time.
271 prepare_to_wait_exclusive(&sep
->event_transactions
,
274 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
275 &sep
->in_use_flags
)) {
276 dev_dbg(&sep
->pdev
->dev
,
277 "[PID%d] no transactions, breaking\n",
281 dev_dbg(&sep
->pdev
->dev
,
282 "[PID%d] transactions ongoing, sleeping\n",
285 dev_dbg(&sep
->pdev
->dev
, "[PID%d] woken up\n", current
->pid
);
287 if (signal_pending(current
)) {
288 dev_dbg(&sep
->pdev
->dev
, "[PID%d] received signal\n",
296 * The pid_doing_transaction indicates that this process
297 * now owns the facilities to performa a transaction with
298 * the SEP. While this process is performing a transaction,
299 * no other process who has the SEP device open can perform
300 * any transactions. This method allows more than one process
301 * to have the device open at any given time, which provides
302 * finer granularity for device utilization by multiple
305 /* Only one process is able to progress here at a time */
306 sep
->pid_doing_transaction
= current
->pid
;
309 finish_wait(&sep
->event_transactions
, &wait
);
315 * sep_check_transaction_owner - Checks if current process owns transaction
318 static inline int sep_check_transaction_owner(struct sep_device
*sep
)
320 dev_dbg(&sep
->pdev
->dev
, "[PID%d] transaction pid = %d\n",
322 sep
->pid_doing_transaction
);
324 if ((sep
->pid_doing_transaction
== 0) ||
325 (current
->pid
!= sep
->pid_doing_transaction
)) {
329 /* We own the transaction */
336 * sep_dump_message - dump the message that is pending
338 * This will only print dump if DEBUG is set; it does
339 * follow kernel debug print enabling
341 static void _sep_dump_message(struct sep_device
*sep
)
345 u32
*p
= sep
->shared_addr
;
347 for (count
= 0; count
< 10 * 4; count
+= 4)
348 dev_dbg(&sep
->pdev
->dev
,
349 "[PID%d] Word %d of the message is %x\n",
350 current
->pid
, count
/4, *p
++);
356 * sep_map_and_alloc_shared_area -allocate shared block
357 * @sep: security processor
358 * @size: size of shared area
360 static int sep_map_and_alloc_shared_area(struct sep_device
*sep
)
362 sep
->shared_addr
= dma_alloc_coherent(&sep
->pdev
->dev
,
364 &sep
->shared_bus
, GFP_KERNEL
);
366 if (!sep
->shared_addr
) {
367 dev_dbg(&sep
->pdev
->dev
,
368 "[PID%d] shared memory dma_alloc_coherent failed\n",
372 dev_dbg(&sep
->pdev
->dev
,
373 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
375 sep
->shared_size
, sep
->shared_addr
,
376 (unsigned long long)sep
->shared_bus
);
381 * sep_unmap_and_free_shared_area - free shared block
382 * @sep: security processor
384 static void sep_unmap_and_free_shared_area(struct sep_device
*sep
)
386 dma_free_coherent(&sep
->pdev
->dev
, sep
->shared_size
,
387 sep
->shared_addr
, sep
->shared_bus
);
393 * sep_shared_bus_to_virt - convert bus/virt addresses
394 * @sep: pointer to struct sep_device
395 * @bus_address: address to convert
397 * Returns virtual address inside the shared area according
398 * to the bus address.
400 static void *sep_shared_bus_to_virt(struct sep_device
*sep
,
401 dma_addr_t bus_address
)
403 return sep
->shared_addr
+ (bus_address
- sep
->shared_bus
);
409 * sep_open - device open method
410 * @inode: inode of SEP device
411 * @filp: file handle to SEP device
413 * Open method for the SEP device. Called when userspace opens
414 * the SEP device node.
416 * Returns zero on success otherwise an error code.
418 static int sep_open(struct inode
*inode
, struct file
*filp
)
420 struct sep_device
*sep
;
421 struct sep_private_data
*priv
;
423 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] open\n", current
->pid
);
425 if (filp
->f_flags
& O_NONBLOCK
)
429 * Get the SEP device structure and use it for the
430 * private_data field in filp for other methods
433 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
439 filp
->private_data
= priv
;
441 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] priv is 0x%p\n",
444 /* Anyone can open; locking takes place at transaction level */
449 * sep_free_dma_table_data_handler - free DMA table
450 * @sep: pointere to struct sep_device
451 * @dma_ctx: dma context
453 * Handles the request to free DMA table for synchronic actions
455 int sep_free_dma_table_data_handler(struct sep_device
*sep
,
456 struct sep_dma_context
**dma_ctx
)
460 /* Pointer to the current dma_resource struct */
461 struct sep_dma_resource
*dma
;
463 dev_dbg(&sep
->pdev
->dev
,
464 "[PID%d] sep_free_dma_table_data_handler\n",
467 if (!dma_ctx
|| !(*dma_ctx
)) {
468 /* No context or context already freed */
469 dev_dbg(&sep
->pdev
->dev
,
470 "[PID%d] no DMA context or context already freed\n",
476 dev_dbg(&sep
->pdev
->dev
, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
478 (*dma_ctx
)->nr_dcb_creat
);
480 for (dcb_counter
= 0;
481 dcb_counter
< (*dma_ctx
)->nr_dcb_creat
; dcb_counter
++) {
482 dma
= &(*dma_ctx
)->dma_res_arr
[dcb_counter
];
484 /* Unmap and free input map array */
485 if (dma
->in_map_array
) {
486 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
487 dma_unmap_page(&sep
->pdev
->dev
,
488 dma
->in_map_array
[count
].dma_addr
,
489 dma
->in_map_array
[count
].size
,
492 kfree(dma
->in_map_array
);
496 * Output is handled different. If
497 * this was a secure dma into restricted memory,
498 * then we skip this step altogether as restricted
499 * memory is not available to the o/s at all.
501 if (((*dma_ctx
)->secure_dma
== false) &&
502 (dma
->out_map_array
)) {
504 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
505 dma_unmap_page(&sep
->pdev
->dev
,
506 dma
->out_map_array
[count
].dma_addr
,
507 dma
->out_map_array
[count
].size
,
510 kfree(dma
->out_map_array
);
513 /* Free page cache for output */
514 if (dma
->in_page_array
) {
515 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
516 flush_dcache_page(dma
->in_page_array
[count
]);
517 page_cache_release(dma
->in_page_array
[count
]);
519 kfree(dma
->in_page_array
);
522 /* Again, we do this only for non secure dma */
523 if (((*dma_ctx
)->secure_dma
== false) &&
524 (dma
->out_page_array
)) {
526 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
527 if (!PageReserved(dma
->out_page_array
[count
]))
530 out_page_array
[count
]);
532 flush_dcache_page(dma
->out_page_array
[count
]);
533 page_cache_release(dma
->out_page_array
[count
]);
535 kfree(dma
->out_page_array
);
539 * Note that here we use in_map_num_entries because we
540 * don't have a page array; the page array is generated
541 * only in the lock_user_pages, which is not called
542 * for kernel crypto, which is what the sg (scatter gather
543 * is used for exclusively
546 dma_unmap_sg(&sep
->pdev
->dev
, dma
->src_sg
,
547 dma
->in_map_num_entries
, DMA_TO_DEVICE
);
552 dma_unmap_sg(&sep
->pdev
->dev
, dma
->dst_sg
,
553 dma
->in_map_num_entries
, DMA_FROM_DEVICE
);
557 /* Reset all the values */
558 dma
->in_page_array
= NULL
;
559 dma
->out_page_array
= NULL
;
560 dma
->in_num_pages
= 0;
561 dma
->out_num_pages
= 0;
562 dma
->in_map_array
= NULL
;
563 dma
->out_map_array
= NULL
;
564 dma
->in_map_num_entries
= 0;
565 dma
->out_map_num_entries
= 0;
568 (*dma_ctx
)->nr_dcb_creat
= 0;
569 (*dma_ctx
)->num_lli_tables_created
= 0;
574 dev_dbg(&sep
->pdev
->dev
,
575 "[PID%d] sep_free_dma_table_data_handler end\n",
582 * sep_end_transaction_handler - end transaction
583 * @sep: pointer to struct sep_device
584 * @dma_ctx: DMA context
585 * @call_status: Call status
587 * This API handles the end transaction request.
589 static int sep_end_transaction_handler(struct sep_device
*sep
,
590 struct sep_dma_context
**dma_ctx
,
591 struct sep_call_status
*call_status
,
592 struct sep_queue_info
**my_queue_elem
)
594 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ending transaction\n", current
->pid
);
597 * Extraneous transaction clearing would mess up PM
598 * device usage counters and SEP would get suspended
599 * just before we send a command to SEP in the next
602 if (sep_check_transaction_owner(sep
)) {
603 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not transaction owner\n",
608 /* Update queue status */
609 sep_queue_status_remove(sep
, my_queue_elem
);
611 /* Check that all the DMA resources were freed */
613 sep_free_dma_table_data_handler(sep
, dma_ctx
);
615 /* Reset call status for next transaction */
617 call_status
->status
= 0;
619 /* Clear the message area to avoid next transaction reading
620 * sensitive results from previous transaction */
621 memset(sep
->shared_addr
, 0,
622 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
624 /* start suspend delay */
625 #ifdef SEP_ENABLE_RUNTIME_PM
628 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
629 pm_runtime_put_autosuspend(&sep
->pdev
->dev
);
633 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
634 sep
->pid_doing_transaction
= 0;
636 /* Now it's safe for next process to proceed */
637 dev_dbg(&sep
->pdev
->dev
, "[PID%d] waking up next transaction\n",
639 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
, &sep
->in_use_flags
);
640 wake_up(&sep
->event_transactions
);
647 * sep_release - close a SEP device
648 * @inode: inode of SEP device
649 * @filp: file handle being closed
651 * Called on the final close of a SEP device.
653 static int sep_release(struct inode
*inode
, struct file
*filp
)
655 struct sep_private_data
* const private_data
= filp
->private_data
;
656 struct sep_call_status
*call_status
= &private_data
->call_status
;
657 struct sep_device
*sep
= private_data
->device
;
658 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
659 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
661 dev_dbg(&sep
->pdev
->dev
, "[PID%d] release\n", current
->pid
);
663 sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
666 kfree(filp
->private_data
);
672 * sep_mmap - maps the shared area to user space
673 * @filp: pointer to struct file
674 * @vma: pointer to vm_area_struct
676 * Called on an mmap of our space via the normal SEP device
678 static int sep_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
680 struct sep_private_data
* const private_data
= filp
->private_data
;
681 struct sep_call_status
*call_status
= &private_data
->call_status
;
682 struct sep_device
*sep
= private_data
->device
;
683 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
685 unsigned long error
= 0;
687 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_mmap\n", current
->pid
);
689 /* Set the transaction busy (own the device) */
691 * Problem for multithreaded applications is that here we're
692 * possibly going to sleep while holding a write lock on
693 * current->mm->mmap_sem, which will cause deadlock for ongoing
694 * transaction trying to create DMA tables
696 error
= sep_wait_transaction(sep
);
698 /* Interrupted by signal, don't clear transaction */
701 /* Clear the message area to avoid next transaction reading
702 * sensitive results from previous transaction */
703 memset(sep
->shared_addr
, 0,
704 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
707 * Check that the size of the mapped range is as the size of the message
710 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
712 goto end_function_with_error
;
715 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared_addr is %p\n",
716 current
->pid
, sep
->shared_addr
);
718 /* Get bus address */
719 bus_addr
= sep
->shared_bus
;
721 if (remap_pfn_range(vma
, vma
->vm_start
, bus_addr
>> PAGE_SHIFT
,
722 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
723 dev_dbg(&sep
->pdev
->dev
, "[PID%d] remap_page_range failed\n",
726 goto end_function_with_error
;
729 /* Update call status */
730 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET
, &call_status
->status
);
734 end_function_with_error
:
735 /* Clear our transaction */
736 sep_end_transaction_handler(sep
, NULL
, call_status
,
744 * sep_poll - poll handler
745 * @filp: pointer to struct file
746 * @wait: pointer to poll_table
748 * Called by the OS when the kernel is asked to do a poll on
751 static unsigned int sep_poll(struct file
*filp
, poll_table
*wait
)
753 struct sep_private_data
* const private_data
= filp
->private_data
;
754 struct sep_call_status
*call_status
= &private_data
->call_status
;
755 struct sep_device
*sep
= private_data
->device
;
759 unsigned long lock_irq_flag
;
761 /* Am I the process that owns the transaction? */
762 if (sep_check_transaction_owner(sep
)) {
763 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll pid not owner\n",
769 /* Check if send command or send_reply were activated previously */
770 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
771 &call_status
->status
)) {
772 dev_warn(&sep
->pdev
->dev
, "[PID%d] sendmsg not called\n",
779 /* Add the event to the polling wait table */
780 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll: calling wait sep_event\n",
783 poll_wait(filp
, &sep
->event_interrupt
, wait
);
785 dev_dbg(&sep
->pdev
->dev
,
786 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
787 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
789 /* Check if error occured during poll */
790 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
791 if ((retval2
!= 0x0) && (retval2
!= 0x8)) {
792 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll; poll error %x\n",
793 current
->pid
, retval2
);
798 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
800 if (sep
->send_ct
== sep
->reply_ct
) {
801 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
802 retval
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
803 dev_dbg(&sep
->pdev
->dev
,
804 "[PID%d] poll: data ready check (GPR2) %x\n",
805 current
->pid
, retval
);
807 /* Check if printf request */
808 if ((retval
>> 30) & 0x1) {
809 dev_dbg(&sep
->pdev
->dev
,
810 "[PID%d] poll: SEP printf request\n",
815 /* Check if the this is SEP reply or request */
817 dev_dbg(&sep
->pdev
->dev
,
818 "[PID%d] poll: SEP request\n",
821 dev_dbg(&sep
->pdev
->dev
,
822 "[PID%d] poll: normal return\n",
824 sep_dump_message(sep
);
825 dev_dbg(&sep
->pdev
->dev
,
826 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
828 mask
|= POLLIN
| POLLRDNORM
;
830 set_bit(SEP_LEGACY_POLL_DONE_OFFSET
, &call_status
->status
);
832 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
833 dev_dbg(&sep
->pdev
->dev
,
834 "[PID%d] poll; no reply; returning mask of 0\n",
844 * sep_time_address - address in SEP memory of time
845 * @sep: SEP device we want the address from
847 * Return the address of the two dwords in memory used for time
850 static u32
*sep_time_address(struct sep_device
*sep
)
852 return sep
->shared_addr
+
853 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES
;
857 * sep_set_time - set the SEP time
858 * @sep: the SEP we are setting the time for
860 * Calculates time and sets it at the predefined address.
861 * Called with the SEP mutex held.
863 static unsigned long sep_set_time(struct sep_device
*sep
)
866 u32
*time_addr
; /* Address of time as seen by the kernel */
869 do_gettimeofday(&time
);
871 /* Set value in the SYSTEM MEMORY offset */
872 time_addr
= sep_time_address(sep
);
874 time_addr
[0] = SEP_TIME_VAL_TOKEN
;
875 time_addr
[1] = time
.tv_sec
;
877 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time.tv_sec is %lu\n",
878 current
->pid
, time
.tv_sec
);
879 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time_addr is %p\n",
880 current
->pid
, time_addr
);
881 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep->shared_addr is %p\n",
882 current
->pid
, sep
->shared_addr
);
888 * sep_send_command_handler - kick off a command
889 * @sep: SEP being signalled
891 * This function raises interrupt to SEP that signals that is has a new
892 * command from the host
894 * Note that this function does fall under the ioctl lock
896 int sep_send_command_handler(struct sep_device
*sep
)
898 unsigned long lock_irq_flag
;
902 /* Basic sanity check; set msg pool to start of shared area */
903 msg_pool
= (u32
*)sep
->shared_addr
;
906 /* Look for start msg token */
907 if (*msg_pool
!= SEP_START_MSG_TOKEN
) {
908 dev_warn(&sep
->pdev
->dev
, "start message token not present\n");
913 /* Do we have a reasonable size? */
915 if ((*msg_pool
< 2) ||
916 (*msg_pool
> SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
)) {
918 dev_warn(&sep
->pdev
->dev
, "invalid message size\n");
923 /* Does the command look reasonable? */
926 dev_warn(&sep
->pdev
->dev
, "invalid message opcode\n");
931 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
932 dev_dbg(&sep
->pdev
->dev
, "[PID%d] before pm sync status 0x%X\n",
934 sep
->pdev
->dev
.power
.runtime_status
);
935 sep
->in_use
= 1; /* device is about to be used */
936 pm_runtime_get_sync(&sep
->pdev
->dev
);
939 if (test_and_set_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
)) {
943 sep
->in_use
= 1; /* device is about to be used */
946 sep_dump_message(sep
);
949 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
951 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
953 dev_dbg(&sep
->pdev
->dev
,
954 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
955 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
957 /* Send interrupt to SEP */
958 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR0_REG_ADDR
, 0x2);
966 * @sep: pointer to struct sep_device
967 * @sg: pointer to struct scatterlist
969 * @dma_maps: pointer to place a pointer to array of dma maps
970 * This is filled in; anything previous there will be lost
971 * The structure for dma maps is sep_dma_map
972 * @returns number of dma maps on success; negative on error
974 * This creates the dma table from the scatterlist
975 * It is used only for kernel crypto as it works with scatterlists
976 * representation of data buffers
979 static int sep_crypto_dma(
980 struct sep_device
*sep
,
981 struct scatterlist
*sg
,
982 struct sep_dma_map
**dma_maps
,
983 enum dma_data_direction direction
)
985 struct scatterlist
*temp_sg
;
989 struct sep_dma_map
*sep_dma
;
995 /* Count the segments */
1000 temp_sg
= scatterwalk_sg_next(temp_sg
);
1002 dev_dbg(&sep
->pdev
->dev
,
1003 "There are (hex) %x segments in sg\n", count_segment
);
1005 /* DMA map segments */
1006 count_mapped
= dma_map_sg(&sep
->pdev
->dev
, sg
,
1007 count_segment
, direction
);
1009 dev_dbg(&sep
->pdev
->dev
,
1010 "There are (hex) %x maps in sg\n", count_mapped
);
1012 if (count_mapped
== 0) {
1013 dev_dbg(&sep
->pdev
->dev
, "Cannot dma_map_sg\n");
1017 sep_dma
= kmalloc(sizeof(struct sep_dma_map
) *
1018 count_mapped
, GFP_ATOMIC
);
1020 if (sep_dma
== NULL
) {
1021 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate dma_maps\n");
1025 for_each_sg(sg
, temp_sg
, count_mapped
, ct1
) {
1026 sep_dma
[ct1
].dma_addr
= sg_dma_address(temp_sg
);
1027 sep_dma
[ct1
].size
= sg_dma_len(temp_sg
);
1028 dev_dbg(&sep
->pdev
->dev
, "(all hex) map %x dma %lx len %lx\n",
1029 ct1
, (unsigned long)sep_dma
[ct1
].dma_addr
,
1030 (unsigned long)sep_dma
[ct1
].size
);
1033 *dma_maps
= sep_dma
;
1034 return count_mapped
;
1040 * @sep: pointer to struct sep_device
1041 * @sg: pointer to struct scatterlist
1042 * @data_size: total data size
1044 * @dma_maps: pointer to place a pointer to array of dma maps
1045 * This is filled in; anything previous there will be lost
1046 * The structure for dma maps is sep_dma_map
1047 * @lli_maps: pointer to place a pointer to array of lli maps
1048 * This is filled in; anything previous there will be lost
1049 * The structure for dma maps is sep_dma_map
1050 * @returns number of dma maps on success; negative on error
1052 * This creates the LLI table from the scatterlist
1053 * It is only used for kernel crypto as it works exclusively
1054 * with scatterlists (struct scatterlist) representation of
1057 static int sep_crypto_lli(
1058 struct sep_device
*sep
,
1059 struct scatterlist
*sg
,
1060 struct sep_dma_map
**maps
,
1061 struct sep_lli_entry
**llis
,
1063 enum dma_data_direction direction
)
1067 struct sep_lli_entry
*sep_lli
;
1068 struct sep_dma_map
*sep_map
;
1072 nbr_ents
= sep_crypto_dma(sep
, sg
, maps
, direction
);
1073 if (nbr_ents
<= 0) {
1074 dev_dbg(&sep
->pdev
->dev
, "crypto_dma failed %x\n",
1081 sep_lli
= kmalloc(sizeof(struct sep_lli_entry
) * nbr_ents
, GFP_ATOMIC
);
1083 if (sep_lli
== NULL
) {
1084 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate lli_maps\n");
1091 for (ct1
= 0; ct1
< nbr_ents
; ct1
+= 1) {
1092 sep_lli
[ct1
].bus_address
= (u32
)sep_map
[ct1
].dma_addr
;
1094 /* Maximum for page is total data size */
1095 if (sep_map
[ct1
].size
> data_size
)
1096 sep_map
[ct1
].size
= data_size
;
1098 sep_lli
[ct1
].block_size
= (u32
)sep_map
[ct1
].size
;
1106 * sep_lock_kernel_pages - map kernel pages for DMA
1107 * @sep: pointer to struct sep_device
1108 * @kernel_virt_addr: address of data buffer in kernel
1109 * @data_size: size of data
1110 * @lli_array_ptr: lli array
1111 * @in_out_flag: input into device or output from device
1113 * This function locks all the physical pages of the kernel virtual buffer
1114 * and construct a basic lli array, where each entry holds the physical
1115 * page address and the size that application data holds in this page
1116 * This function is used only during kernel crypto mod calls from within
1117 * the kernel (when ioctl is not used)
1119 * This is used only for kernel crypto. Kernel pages
1120 * are handled differently as they are done via
1121 * scatter gather lists (struct scatterlist)
1123 static int sep_lock_kernel_pages(struct sep_device
*sep
,
1124 unsigned long kernel_virt_addr
,
1126 struct sep_lli_entry
**lli_array_ptr
,
1128 struct sep_dma_context
*dma_ctx
)
1132 struct scatterlist
*sg
;
1135 struct sep_lli_entry
*lli_array
;
1137 struct sep_dma_map
*map_array
;
1139 enum dma_data_direction direction
;
1144 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1145 direction
= DMA_TO_DEVICE
;
1146 sg
= dma_ctx
->src_sg
;
1148 direction
= DMA_FROM_DEVICE
;
1149 sg
= dma_ctx
->dst_sg
;
1152 num_pages
= sep_crypto_lli(sep
, sg
, &map_array
, &lli_array
,
1153 data_size
, direction
);
1155 if (num_pages
<= 0) {
1156 dev_dbg(&sep
->pdev
->dev
, "sep_crypto_lli returned error %x\n",
1161 /* Put mapped kernel sg into kernel resource array */
1163 /* Set output params acording to the in_out flag */
1164 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1165 *lli_array_ptr
= lli_array
;
1166 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1168 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1170 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1172 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1174 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
=
1177 *lli_array_ptr
= lli_array
;
1178 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1180 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1182 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1184 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1185 out_map_num_entries
= num_pages
;
1186 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
=
1194 * sep_lock_user_pages - lock and map user pages for DMA
1195 * @sep: pointer to struct sep_device
1196 * @app_virt_addr: user memory data buffer
1197 * @data_size: size of data buffer
1198 * @lli_array_ptr: lli array
1199 * @in_out_flag: input or output to device
1201 * This function locks all the physical pages of the application
1202 * virtual buffer and construct a basic lli array, where each entry
1203 * holds the physical page address and the size that application
1204 * data holds in this physical pages
1206 static int sep_lock_user_pages(struct sep_device
*sep
,
1209 struct sep_lli_entry
**lli_array_ptr
,
1211 struct sep_dma_context
*dma_ctx
)
1217 /* The the page of the end address of the user space buffer */
1219 /* The page of the start address of the user space buffer */
1221 /* The range in pages */
1223 /* Array of pointers to page */
1224 struct page
**page_array
;
1226 struct sep_lli_entry
*lli_array
;
1228 struct sep_dma_map
*map_array
;
1230 /* Set start and end pages and num pages */
1231 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1232 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1233 num_pages
= end_page
- start_page
+ 1;
1235 dev_dbg(&sep
->pdev
->dev
,
1236 "[PID%d] lock user pages app_virt_addr is %x\n",
1237 current
->pid
, app_virt_addr
);
1239 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1240 current
->pid
, data_size
);
1241 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1242 current
->pid
, start_page
);
1243 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1244 current
->pid
, end_page
);
1245 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1246 current
->pid
, num_pages
);
1248 /* Allocate array of pages structure pointers */
1249 page_array
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_ATOMIC
);
1254 map_array
= kmalloc(sizeof(struct sep_dma_map
) * num_pages
, GFP_ATOMIC
);
1256 dev_warn(&sep
->pdev
->dev
,
1257 "[PID%d] kmalloc for map_array failed\n",
1260 goto end_function_with_error1
;
1263 lli_array
= kmalloc(sizeof(struct sep_lli_entry
) * num_pages
,
1267 dev_warn(&sep
->pdev
->dev
,
1268 "[PID%d] kmalloc for lli_array failed\n",
1271 goto end_function_with_error2
;
1274 /* Convert the application virtual address into a set of physical */
1275 down_read(¤t
->mm
->mmap_sem
);
1276 result
= get_user_pages(current
, current
->mm
, app_virt_addr
,
1278 ((in_out_flag
== SEP_DRIVER_IN_FLAG
) ? 0 : 1),
1279 0, page_array
, NULL
);
1281 up_read(¤t
->mm
->mmap_sem
);
1283 /* Check the number of pages locked - if not all then exit with error */
1284 if (result
!= num_pages
) {
1285 dev_warn(&sep
->pdev
->dev
,
1286 "[PID%d] not all pages locked by get_user_pages, "
1287 "result 0x%X, num_pages 0x%X\n",
1288 current
->pid
, result
, num_pages
);
1290 goto end_function_with_error3
;
1293 dev_dbg(&sep
->pdev
->dev
, "[PID%d] get_user_pages succeeded\n",
1297 * Fill the array using page array data and
1298 * map the pages - this action will also flush the cache as needed
1300 for (count
= 0; count
< num_pages
; count
++) {
1301 /* Fill the map array */
1302 map_array
[count
].dma_addr
=
1303 dma_map_page(&sep
->pdev
->dev
, page_array
[count
],
1304 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1306 map_array
[count
].size
= PAGE_SIZE
;
1308 /* Fill the lli array entry */
1309 lli_array
[count
].bus_address
= (u32
)map_array
[count
].dma_addr
;
1310 lli_array
[count
].block_size
= PAGE_SIZE
;
1312 dev_dbg(&sep
->pdev
->dev
,
1313 "[PID%d] lli_array[%x].bus_address is %08lx, "
1314 "lli_array[%x].block_size is (hex) %x\n", current
->pid
,
1315 count
, (unsigned long)lli_array
[count
].bus_address
,
1316 count
, lli_array
[count
].block_size
);
1319 /* Check the offset for the first page */
1320 lli_array
[0].bus_address
=
1321 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1323 /* Check that not all the data is in the first page only */
1324 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1325 lli_array
[0].block_size
= data_size
;
1327 lli_array
[0].block_size
=
1328 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1330 dev_dbg(&sep
->pdev
->dev
,
1331 "[PID%d] After check if page 0 has all data\n",
1333 dev_dbg(&sep
->pdev
->dev
,
1334 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1335 "lli_array[0].block_size is (hex) %x\n",
1337 (unsigned long)lli_array
[0].bus_address
,
1338 lli_array
[0].block_size
);
1341 /* Check the size of the last page */
1342 if (num_pages
> 1) {
1343 lli_array
[num_pages
- 1].block_size
=
1344 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1345 if (lli_array
[num_pages
- 1].block_size
== 0)
1346 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1348 dev_dbg(&sep
->pdev
->dev
,
1349 "[PID%d] After last page size adjustment\n",
1351 dev_dbg(&sep
->pdev
->dev
,
1352 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1353 "lli_array[%x].block_size is (hex) %x\n",
1356 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1358 lli_array
[num_pages
- 1].block_size
);
1361 /* Set output params acording to the in_out flag */
1362 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1363 *lli_array_ptr
= lli_array
;
1364 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1366 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1368 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1370 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1372 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
= NULL
;
1374 *lli_array_ptr
= lli_array
;
1375 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1377 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1379 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1381 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1382 out_map_num_entries
= num_pages
;
1383 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
= NULL
;
1387 end_function_with_error3
:
1388 /* Free lli array */
1391 end_function_with_error2
:
1394 end_function_with_error1
:
1395 /* Free page array */
1403 * sep_lli_table_secure_dma - get lli array for IMR addresses
1404 * @sep: pointer to struct sep_device
1405 * @app_virt_addr: user memory data buffer
1406 * @data_size: size of data buffer
1407 * @lli_array_ptr: lli array
1408 * @in_out_flag: not used
1409 * @dma_ctx: pointer to struct sep_dma_context
1411 * This function creates lli tables for outputting data to
1412 * IMR memory, which is memory that cannot be accessed by the
1413 * the x86 processor.
1415 static int sep_lli_table_secure_dma(struct sep_device
*sep
,
1418 struct sep_lli_entry
**lli_array_ptr
,
1420 struct sep_dma_context
*dma_ctx
)
1425 /* The the page of the end address of the user space buffer */
1427 /* The page of the start address of the user space buffer */
1429 /* The range in pages */
1432 struct sep_lli_entry
*lli_array
;
1434 /* Set start and end pages and num pages */
1435 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1436 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1437 num_pages
= end_page
- start_page
+ 1;
1439 dev_dbg(&sep
->pdev
->dev
, "[PID%d] lock user pages"
1440 " app_virt_addr is %x\n", current
->pid
, app_virt_addr
);
1442 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1443 current
->pid
, data_size
);
1444 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1445 current
->pid
, start_page
);
1446 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1447 current
->pid
, end_page
);
1448 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1449 current
->pid
, num_pages
);
1451 lli_array
= kmalloc(sizeof(struct sep_lli_entry
) * num_pages
,
1455 dev_warn(&sep
->pdev
->dev
,
1456 "[PID%d] kmalloc for lli_array failed\n",
1462 * Fill the lli_array
1464 start_page
= start_page
<< PAGE_SHIFT
;
1465 for (count
= 0; count
< num_pages
; count
++) {
1466 /* Fill the lli array entry */
1467 lli_array
[count
].bus_address
= start_page
;
1468 lli_array
[count
].block_size
= PAGE_SIZE
;
1470 start_page
+= PAGE_SIZE
;
1472 dev_dbg(&sep
->pdev
->dev
,
1473 "[PID%d] lli_array[%x].bus_address is %08lx, "
1474 "lli_array[%x].block_size is (hex) %x\n",
1476 count
, (unsigned long)lli_array
[count
].bus_address
,
1477 count
, lli_array
[count
].block_size
);
1480 /* Check the offset for the first page */
1481 lli_array
[0].bus_address
=
1482 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1484 /* Check that not all the data is in the first page only */
1485 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1486 lli_array
[0].block_size
= data_size
;
1488 lli_array
[0].block_size
=
1489 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1491 dev_dbg(&sep
->pdev
->dev
,
1492 "[PID%d] After check if page 0 has all data\n"
1493 "lli_array[0].bus_address is (hex) %08lx, "
1494 "lli_array[0].block_size is (hex) %x\n",
1496 (unsigned long)lli_array
[0].bus_address
,
1497 lli_array
[0].block_size
);
1499 /* Check the size of the last page */
1500 if (num_pages
> 1) {
1501 lli_array
[num_pages
- 1].block_size
=
1502 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1503 if (lli_array
[num_pages
- 1].block_size
== 0)
1504 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1506 dev_dbg(&sep
->pdev
->dev
,
1507 "[PID%d] After last page size adjustment\n"
1508 "lli_array[%x].bus_address is (hex) %08lx, "
1509 "lli_array[%x].block_size is (hex) %x\n",
1510 current
->pid
, num_pages
- 1,
1511 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1513 lli_array
[num_pages
- 1].block_size
);
1515 *lli_array_ptr
= lli_array
;
1516 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
= num_pages
;
1517 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
1518 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
1519 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_num_entries
= 0;
1525 * sep_calculate_lli_table_max_size - size the LLI table
1526 * @sep: pointer to struct sep_device
1528 * @num_array_entries
1531 * This function calculates the size of data that can be inserted into
1532 * the lli table from this array, such that either the table is full
1533 * (all entries are entered), or there are no more entries in the
1536 static u32
sep_calculate_lli_table_max_size(struct sep_device
*sep
,
1537 struct sep_lli_entry
*lli_in_array_ptr
,
1538 u32 num_array_entries
,
1539 u32
*last_table_flag
)
1542 /* Table data size */
1543 u32 table_data_size
= 0;
1544 /* Data size for the next table */
1545 u32 next_table_data_size
;
1547 *last_table_flag
= 0;
1550 * Calculate the data in the out lli table till we fill the whole
1551 * table or till the data has ended
1554 (counter
< (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
- 1)) &&
1555 (counter
< num_array_entries
); counter
++)
1556 table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1559 * Check if we reached the last entry,
1560 * meaning this ia the last table to build,
1561 * and no need to check the block alignment
1563 if (counter
== num_array_entries
) {
1564 /* Set the last table flag */
1565 *last_table_flag
= 1;
1570 * Calculate the data size of the next table.
1571 * Stop if no entries left or if data size is more the DMA restriction
1573 next_table_data_size
= 0;
1574 for (; counter
< num_array_entries
; counter
++) {
1575 next_table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1576 if (next_table_data_size
>= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1581 * Check if the next table data size is less then DMA rstriction.
1582 * if it is - recalculate the current table size, so that the next
1583 * table data size will be adaquete for DMA
1585 if (next_table_data_size
&&
1586 next_table_data_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1588 table_data_size
-= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
-
1589 next_table_data_size
);
1592 return table_data_size
;
1596 * sep_build_lli_table - build an lli array for the given table
1597 * @sep: pointer to struct sep_device
1598 * @lli_array_ptr: pointer to lli array
1599 * @lli_table_ptr: pointer to lli table
1600 * @num_processed_entries_ptr: pointer to number of entries
1601 * @num_table_entries_ptr: pointer to number of tables
1602 * @table_data_size: total data size
1604 * Builds ant lli table from the lli_array according to
1605 * the given size of data
1607 static void sep_build_lli_table(struct sep_device
*sep
,
1608 struct sep_lli_entry
*lli_array_ptr
,
1609 struct sep_lli_entry
*lli_table_ptr
,
1610 u32
*num_processed_entries_ptr
,
1611 u32
*num_table_entries_ptr
,
1612 u32 table_data_size
)
1614 /* Current table data size */
1615 u32 curr_table_data_size
;
1616 /* Counter of lli array entry */
1619 /* Init current table data size and lli array entry counter */
1620 curr_table_data_size
= 0;
1622 *num_table_entries_ptr
= 1;
1624 dev_dbg(&sep
->pdev
->dev
,
1625 "[PID%d] build lli table table_data_size: (hex) %x\n",
1626 current
->pid
, table_data_size
);
1628 /* Fill the table till table size reaches the needed amount */
1629 while (curr_table_data_size
< table_data_size
) {
1630 /* Update the number of entries in table */
1631 (*num_table_entries_ptr
)++;
1633 lli_table_ptr
->bus_address
=
1634 cpu_to_le32(lli_array_ptr
[array_counter
].bus_address
);
1636 lli_table_ptr
->block_size
=
1637 cpu_to_le32(lli_array_ptr
[array_counter
].block_size
);
1639 curr_table_data_size
+= lli_array_ptr
[array_counter
].block_size
;
1641 dev_dbg(&sep
->pdev
->dev
,
1642 "[PID%d] lli_table_ptr is %p\n",
1643 current
->pid
, lli_table_ptr
);
1644 dev_dbg(&sep
->pdev
->dev
,
1645 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1647 (unsigned long)lli_table_ptr
->bus_address
);
1649 dev_dbg(&sep
->pdev
->dev
,
1650 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1651 current
->pid
, lli_table_ptr
->block_size
);
1653 /* Check for overflow of the table data */
1654 if (curr_table_data_size
> table_data_size
) {
1655 dev_dbg(&sep
->pdev
->dev
,
1656 "[PID%d] curr_table_data_size too large\n",
1659 /* Update the size of block in the table */
1660 lli_table_ptr
->block_size
=
1661 cpu_to_le32(lli_table_ptr
->block_size
) -
1662 (curr_table_data_size
- table_data_size
);
1664 /* Update the physical address in the lli array */
1665 lli_array_ptr
[array_counter
].bus_address
+=
1666 cpu_to_le32(lli_table_ptr
->block_size
);
1668 /* Update the block size left in the lli array */
1669 lli_array_ptr
[array_counter
].block_size
=
1670 (curr_table_data_size
- table_data_size
);
1672 /* Advance to the next entry in the lli_array */
1675 dev_dbg(&sep
->pdev
->dev
,
1676 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1678 (unsigned long)lli_table_ptr
->bus_address
);
1679 dev_dbg(&sep
->pdev
->dev
,
1680 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1682 lli_table_ptr
->block_size
);
1684 /* Move to the next entry in table */
1688 /* Set the info entry to default */
1689 lli_table_ptr
->bus_address
= 0xffffffff;
1690 lli_table_ptr
->block_size
= 0;
1692 /* Set the output parameter */
1693 *num_processed_entries_ptr
+= array_counter
;
1698 * sep_shared_area_virt_to_bus - map shared area to bus address
1699 * @sep: pointer to struct sep_device
1700 * @virt_address: virtual address to convert
1702 * This functions returns the physical address inside shared area according
1703 * to the virtual address. It can be either on the externa RAM device
1704 * (ioremapped), or on the system RAM
1705 * This implementation is for the external RAM
1707 static dma_addr_t
sep_shared_area_virt_to_bus(struct sep_device
*sep
,
1710 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys v %p\n",
1711 current
->pid
, virt_address
);
1712 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys p %08lx\n",
1715 sep
->shared_bus
+ (virt_address
- sep
->shared_addr
));
1717 return sep
->shared_bus
+ (size_t)(virt_address
- sep
->shared_addr
);
1721 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1722 * @sep: pointer to struct sep_device
1723 * @bus_address: bus address to convert
1725 * This functions returns the virtual address inside shared area
1726 * according to the physical address. It can be either on the
1727 * externa RAM device (ioremapped), or on the system RAM
1728 * This implementation is for the external RAM
1730 static void *sep_shared_area_bus_to_virt(struct sep_device
*sep
,
1731 dma_addr_t bus_address
)
1733 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1735 (unsigned long)bus_address
, (unsigned long)(sep
->shared_addr
+
1736 (size_t)(bus_address
- sep
->shared_bus
)));
1738 return sep
->shared_addr
+ (size_t)(bus_address
- sep
->shared_bus
);
1742 * sep_debug_print_lli_tables - dump LLI table
1743 * @sep: pointer to struct sep_device
1744 * @lli_table_ptr: pointer to sep_lli_entry
1745 * @num_table_entries: number of entries
1746 * @table_data_size: total data size
1748 * Walk the the list of the print created tables and print all the data
1750 static void sep_debug_print_lli_tables(struct sep_device
*sep
,
1751 struct sep_lli_entry
*lli_table_ptr
,
1752 unsigned long num_table_entries
,
1753 unsigned long table_data_size
)
1756 unsigned long table_count
= 1;
1757 unsigned long entries_count
= 0;
1759 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables start\n",
1761 if (num_table_entries
== 0) {
1762 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no table to print\n",
1767 while ((unsigned long) lli_table_ptr
->bus_address
!= 0xffffffff) {
1768 dev_dbg(&sep
->pdev
->dev
,
1769 "[PID%d] lli table %08lx, "
1770 "table_data_size is (hex) %lx\n",
1771 current
->pid
, table_count
, table_data_size
);
1772 dev_dbg(&sep
->pdev
->dev
,
1773 "[PID%d] num_table_entries is (hex) %lx\n",
1774 current
->pid
, num_table_entries
);
1776 /* Print entries of the table (without info entry) */
1777 for (entries_count
= 0; entries_count
< num_table_entries
;
1778 entries_count
++, lli_table_ptr
++) {
1780 dev_dbg(&sep
->pdev
->dev
,
1781 "[PID%d] lli_table_ptr address is %08lx\n",
1783 (unsigned long) lli_table_ptr
);
1785 dev_dbg(&sep
->pdev
->dev
,
1786 "[PID%d] phys address is %08lx "
1787 "block size is (hex) %x\n", current
->pid
,
1788 (unsigned long)lli_table_ptr
->bus_address
,
1789 lli_table_ptr
->block_size
);
1792 /* Point to the info entry */
1795 dev_dbg(&sep
->pdev
->dev
,
1796 "[PID%d] phys lli_table_ptr->block_size "
1799 lli_table_ptr
->block_size
);
1801 dev_dbg(&sep
->pdev
->dev
,
1802 "[PID%d] phys lli_table_ptr->physical_address "
1805 (unsigned long)lli_table_ptr
->bus_address
);
1808 table_data_size
= lli_table_ptr
->block_size
& 0xffffff;
1809 num_table_entries
= (lli_table_ptr
->block_size
>> 24) & 0xff;
1811 dev_dbg(&sep
->pdev
->dev
,
1812 "[PID%d] phys table_data_size is "
1813 "(hex) %lx num_table_entries is"
1814 " %lx bus_address is%lx\n",
1818 (unsigned long)lli_table_ptr
->bus_address
);
1820 if ((unsigned long)lli_table_ptr
->bus_address
!= 0xffffffff)
1821 lli_table_ptr
= (struct sep_lli_entry
*)
1822 sep_shared_bus_to_virt(sep
,
1823 (unsigned long)lli_table_ptr
->bus_address
);
1827 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables end\n",
1834 * sep_prepare_empty_lli_table - create a blank LLI table
1835 * @sep: pointer to struct sep_device
1836 * @lli_table_addr_ptr: pointer to lli table
1837 * @num_entries_ptr: pointer to number of entries
1838 * @table_data_size_ptr: point to table data size
1839 * @dmatables_region: Optional buffer for DMA tables
1840 * @dma_ctx: DMA context
1842 * This function creates empty lli tables when there is no data
1844 static void sep_prepare_empty_lli_table(struct sep_device
*sep
,
1845 dma_addr_t
*lli_table_addr_ptr
,
1846 u32
*num_entries_ptr
,
1847 u32
*table_data_size_ptr
,
1848 void **dmatables_region
,
1849 struct sep_dma_context
*dma_ctx
)
1851 struct sep_lli_entry
*lli_table_ptr
;
1853 /* Find the area for new table */
1855 (struct sep_lli_entry
*)(sep
->shared_addr
+
1856 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1857 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1858 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1860 if (dmatables_region
&& *dmatables_region
)
1861 lli_table_ptr
= *dmatables_region
;
1863 lli_table_ptr
->bus_address
= 0;
1864 lli_table_ptr
->block_size
= 0;
1867 lli_table_ptr
->bus_address
= 0xFFFFFFFF;
1868 lli_table_ptr
->block_size
= 0;
1870 /* Set the output parameter value */
1871 *lli_table_addr_ptr
= sep
->shared_bus
+
1872 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1873 dma_ctx
->num_lli_tables_created
*
1874 sizeof(struct sep_lli_entry
) *
1875 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1877 /* Set the num of entries and table data size for empty table */
1878 *num_entries_ptr
= 2;
1879 *table_data_size_ptr
= 0;
1881 /* Update the number of created tables */
1882 dma_ctx
->num_lli_tables_created
++;
1886 * sep_prepare_input_dma_table - prepare input DMA mappings
1887 * @sep: pointer to struct sep_device
1892 * @table_data_size_ptr:
1893 * @is_kva: set for kernel data (kernel cryptio call)
1895 * This function prepares only input DMA table for synhronic symmetric
1897 * Note that all bus addresses that are passed to the SEP
1898 * are in 32 bit format; the SEP is a 32 bit device
1900 static int sep_prepare_input_dma_table(struct sep_device
*sep
,
1901 unsigned long app_virt_addr
,
1904 dma_addr_t
*lli_table_ptr
,
1905 u32
*num_entries_ptr
,
1906 u32
*table_data_size_ptr
,
1908 void **dmatables_region
,
1909 struct sep_dma_context
*dma_ctx
1913 /* Pointer to the info entry of the table - the last entry */
1914 struct sep_lli_entry
*info_entry_ptr
;
1915 /* Array of pointers to page */
1916 struct sep_lli_entry
*lli_array_ptr
;
1917 /* Points to the first entry to be processed in the lli_in_array */
1918 u32 current_entry
= 0;
1919 /* Num entries in the virtual buffer */
1920 u32 sep_lli_entries
= 0;
1921 /* Lli table pointer */
1922 struct sep_lli_entry
*in_lli_table_ptr
;
1923 /* The total data in one table */
1924 u32 table_data_size
= 0;
1925 /* Flag for last table */
1926 u32 last_table_flag
= 0;
1927 /* Number of entries in lli table */
1928 u32 num_entries_in_table
= 0;
1929 /* Next table address */
1930 void *lli_table_alloc_addr
= NULL
;
1931 void *dma_lli_table_alloc_addr
= NULL
;
1932 void *dma_in_lli_table_ptr
= NULL
;
1934 dev_dbg(&sep
->pdev
->dev
, "[PID%d] prepare intput dma "
1935 "tbl data size: (hex) %x\n",
1936 current
->pid
, data_size
);
1938 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size is (hex) %x\n",
1939 current
->pid
, block_size
);
1941 /* Initialize the pages pointers */
1942 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
1943 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
= 0;
1945 /* Set the kernel address for first table to be allocated */
1946 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1947 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1948 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1949 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1951 if (data_size
== 0) {
1952 if (dmatables_region
) {
1953 error
= sep_allocate_dmatables_region(sep
,
1960 /* Special case - create meptu table - 2 entries, zero data */
1961 sep_prepare_empty_lli_table(sep
, lli_table_ptr
,
1962 num_entries_ptr
, table_data_size_ptr
,
1963 dmatables_region
, dma_ctx
);
1964 goto update_dcb_counter
;
1967 /* Check if the pages are in Kernel Virtual Address layout */
1969 error
= sep_lock_kernel_pages(sep
, app_virt_addr
,
1970 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1974 * Lock the pages of the user buffer
1975 * and translate them to pages
1977 error
= sep_lock_user_pages(sep
, app_virt_addr
,
1978 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1984 dev_dbg(&sep
->pdev
->dev
,
1985 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1987 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
1990 info_entry_ptr
= NULL
;
1993 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
;
1995 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
1996 if (dmatables_region
) {
1997 error
= sep_allocate_dmatables_region(sep
,
2003 lli_table_alloc_addr
= *dmatables_region
;
2006 /* Loop till all the entries in in array are processed */
2007 while (current_entry
< sep_lli_entries
) {
2009 /* Set the new input and output tables */
2011 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2012 dma_in_lli_table_ptr
=
2013 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2015 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2016 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2017 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2018 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2020 if (dma_lli_table_alloc_addr
>
2021 ((void *)sep
->shared_addr
+
2022 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2023 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2026 goto end_function_error
;
2030 /* Update the number of created tables */
2031 dma_ctx
->num_lli_tables_created
++;
2033 /* Calculate the maximum size of data for input table */
2034 table_data_size
= sep_calculate_lli_table_max_size(sep
,
2035 &lli_array_ptr
[current_entry
],
2036 (sep_lli_entries
- current_entry
),
2040 * If this is not the last table -
2041 * then allign it to the block size
2043 if (!last_table_flag
)
2045 (table_data_size
/ block_size
) * block_size
;
2047 dev_dbg(&sep
->pdev
->dev
,
2048 "[PID%d] output table_data_size is (hex) %x\n",
2052 /* Construct input lli table */
2053 sep_build_lli_table(sep
, &lli_array_ptr
[current_entry
],
2055 ¤t_entry
, &num_entries_in_table
, table_data_size
);
2057 if (info_entry_ptr
== NULL
) {
2059 /* Set the output parameters to physical addresses */
2060 *lli_table_ptr
= sep_shared_area_virt_to_bus(sep
,
2061 dma_in_lli_table_ptr
);
2062 *num_entries_ptr
= num_entries_in_table
;
2063 *table_data_size_ptr
= table_data_size
;
2065 dev_dbg(&sep
->pdev
->dev
,
2066 "[PID%d] output lli_table_in_ptr is %08lx\n",
2068 (unsigned long)*lli_table_ptr
);
2071 /* Update the info entry of the previous in table */
2072 info_entry_ptr
->bus_address
=
2073 sep_shared_area_virt_to_bus(sep
,
2074 dma_in_lli_table_ptr
);
2075 info_entry_ptr
->block_size
=
2076 ((num_entries_in_table
) << 24) |
2079 /* Save the pointer to the info entry of the current tables */
2080 info_entry_ptr
= in_lli_table_ptr
+ num_entries_in_table
- 1;
2082 /* Print input tables */
2083 if (!dmatables_region
) {
2084 sep_debug_print_lli_tables(sep
, (struct sep_lli_entry
*)
2085 sep_shared_area_bus_to_virt(sep
, *lli_table_ptr
),
2086 *num_entries_ptr
, *table_data_size_ptr
);
2089 /* The array of the pages */
2090 kfree(lli_array_ptr
);
2093 /* Update DCB counter */
2094 dma_ctx
->nr_dcb_creat
++;
2098 /* Free all the allocated resources */
2099 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2100 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2101 kfree(lli_array_ptr
);
2102 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2103 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2111 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2112 * @sep: pointer to struct sep_device
2114 * @sep_in_lli_entries:
2116 * @sep_out_lli_entries
2119 * @lli_table_out_ptr
2120 * @in_num_entries_ptr
2121 * @out_num_entries_ptr
2122 * @table_data_size_ptr
2124 * This function creates the input and output DMA tables for
2125 * symmetric operations (AES/DES) according to the block
2126 * size from LLI arays
2127 * Note that all bus addresses that are passed to the SEP
2128 * are in 32 bit format; the SEP is a 32 bit device
2130 static int sep_construct_dma_tables_from_lli(
2131 struct sep_device
*sep
,
2132 struct sep_lli_entry
*lli_in_array
,
2133 u32 sep_in_lli_entries
,
2134 struct sep_lli_entry
*lli_out_array
,
2135 u32 sep_out_lli_entries
,
2137 dma_addr_t
*lli_table_in_ptr
,
2138 dma_addr_t
*lli_table_out_ptr
,
2139 u32
*in_num_entries_ptr
,
2140 u32
*out_num_entries_ptr
,
2141 u32
*table_data_size_ptr
,
2142 void **dmatables_region
,
2143 struct sep_dma_context
*dma_ctx
)
2145 /* Points to the area where next lli table can be allocated */
2146 void *lli_table_alloc_addr
= NULL
;
2148 * Points to the area in shared region where next lli table
2151 void *dma_lli_table_alloc_addr
= NULL
;
2152 /* Input lli table in dmatables_region or shared region */
2153 struct sep_lli_entry
*in_lli_table_ptr
= NULL
;
2154 /* Input lli table location in the shared region */
2155 struct sep_lli_entry
*dma_in_lli_table_ptr
= NULL
;
2156 /* Output lli table in dmatables_region or shared region */
2157 struct sep_lli_entry
*out_lli_table_ptr
= NULL
;
2158 /* Output lli table location in the shared region */
2159 struct sep_lli_entry
*dma_out_lli_table_ptr
= NULL
;
2160 /* Pointer to the info entry of the table - the last entry */
2161 struct sep_lli_entry
*info_in_entry_ptr
= NULL
;
2162 /* Pointer to the info entry of the table - the last entry */
2163 struct sep_lli_entry
*info_out_entry_ptr
= NULL
;
2164 /* Points to the first entry to be processed in the lli_in_array */
2165 u32 current_in_entry
= 0;
2166 /* Points to the first entry to be processed in the lli_out_array */
2167 u32 current_out_entry
= 0;
2168 /* Max size of the input table */
2169 u32 in_table_data_size
= 0;
2170 /* Max size of the output table */
2171 u32 out_table_data_size
= 0;
2172 /* Flag te signifies if this is the last tables build */
2173 u32 last_table_flag
= 0;
2174 /* The data size that should be in table */
2175 u32 table_data_size
= 0;
2176 /* Number of etnries in the input table */
2177 u32 num_entries_in_table
= 0;
2178 /* Number of etnries in the output table */
2179 u32 num_entries_out_table
= 0;
2182 dev_warn(&sep
->pdev
->dev
, "DMA context uninitialized\n");
2186 /* Initiate to point after the message area */
2187 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
2188 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2189 (dma_ctx
->num_lli_tables_created
*
2190 (sizeof(struct sep_lli_entry
) *
2191 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
)));
2192 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
2194 if (dmatables_region
) {
2195 /* 2 for both in+out table */
2196 if (sep_allocate_dmatables_region(sep
,
2199 2*sep_in_lli_entries
))
2201 lli_table_alloc_addr
= *dmatables_region
;
2204 /* Loop till all the entries in in array are not processed */
2205 while (current_in_entry
< sep_in_lli_entries
) {
2206 /* Set the new input and output tables */
2208 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2209 dma_in_lli_table_ptr
=
2210 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2212 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2213 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2214 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2215 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2217 /* Set the first output tables */
2219 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2220 dma_out_lli_table_ptr
=
2221 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2223 /* Check if the DMA table area limit was overrun */
2224 if ((dma_lli_table_alloc_addr
+ sizeof(struct sep_lli_entry
) *
2225 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
) >
2226 ((void *)sep
->shared_addr
+
2227 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2228 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2230 dev_warn(&sep
->pdev
->dev
, "dma table limit overrun\n");
2234 /* Update the number of the lli tables created */
2235 dma_ctx
->num_lli_tables_created
+= 2;
2237 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2238 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2239 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2240 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2242 /* Calculate the maximum size of data for input table */
2243 in_table_data_size
=
2244 sep_calculate_lli_table_max_size(sep
,
2245 &lli_in_array
[current_in_entry
],
2246 (sep_in_lli_entries
- current_in_entry
),
2249 /* Calculate the maximum size of data for output table */
2250 out_table_data_size
=
2251 sep_calculate_lli_table_max_size(sep
,
2252 &lli_out_array
[current_out_entry
],
2253 (sep_out_lli_entries
- current_out_entry
),
2256 if (!last_table_flag
) {
2257 in_table_data_size
= (in_table_data_size
/
2258 block_size
) * block_size
;
2259 out_table_data_size
= (out_table_data_size
/
2260 block_size
) * block_size
;
2263 table_data_size
= in_table_data_size
;
2264 if (table_data_size
> out_table_data_size
)
2265 table_data_size
= out_table_data_size
;
2267 dev_dbg(&sep
->pdev
->dev
,
2268 "[PID%d] construct tables from lli"
2269 " in_table_data_size is (hex) %x\n", current
->pid
,
2270 in_table_data_size
);
2272 dev_dbg(&sep
->pdev
->dev
,
2273 "[PID%d] construct tables from lli"
2274 "out_table_data_size is (hex) %x\n", current
->pid
,
2275 out_table_data_size
);
2277 /* Construct input lli table */
2278 sep_build_lli_table(sep
, &lli_in_array
[current_in_entry
],
2281 &num_entries_in_table
,
2284 /* Construct output lli table */
2285 sep_build_lli_table(sep
, &lli_out_array
[current_out_entry
],
2288 &num_entries_out_table
,
2291 /* If info entry is null - this is the first table built */
2292 if (info_in_entry_ptr
== NULL
) {
2293 /* Set the output parameters to physical addresses */
2295 sep_shared_area_virt_to_bus(sep
, dma_in_lli_table_ptr
);
2297 *in_num_entries_ptr
= num_entries_in_table
;
2299 *lli_table_out_ptr
=
2300 sep_shared_area_virt_to_bus(sep
,
2301 dma_out_lli_table_ptr
);
2303 *out_num_entries_ptr
= num_entries_out_table
;
2304 *table_data_size_ptr
= table_data_size
;
2306 dev_dbg(&sep
->pdev
->dev
,
2307 "[PID%d] output lli_table_in_ptr is %08lx\n",
2309 (unsigned long)*lli_table_in_ptr
);
2310 dev_dbg(&sep
->pdev
->dev
,
2311 "[PID%d] output lli_table_out_ptr is %08lx\n",
2313 (unsigned long)*lli_table_out_ptr
);
2315 /* Update the info entry of the previous in table */
2316 info_in_entry_ptr
->bus_address
=
2317 sep_shared_area_virt_to_bus(sep
,
2318 dma_in_lli_table_ptr
);
2320 info_in_entry_ptr
->block_size
=
2321 ((num_entries_in_table
) << 24) |
2324 /* Update the info entry of the previous in table */
2325 info_out_entry_ptr
->bus_address
=
2326 sep_shared_area_virt_to_bus(sep
,
2327 dma_out_lli_table_ptr
);
2329 info_out_entry_ptr
->block_size
=
2330 ((num_entries_out_table
) << 24) |
2333 dev_dbg(&sep
->pdev
->dev
,
2334 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2336 (unsigned long)info_in_entry_ptr
->bus_address
,
2337 info_in_entry_ptr
->block_size
);
2339 dev_dbg(&sep
->pdev
->dev
,
2340 "[PID%d] output lli_table_out_ptr:"
2343 (unsigned long)info_out_entry_ptr
->bus_address
,
2344 info_out_entry_ptr
->block_size
);
2347 /* Save the pointer to the info entry of the current tables */
2348 info_in_entry_ptr
= in_lli_table_ptr
+
2349 num_entries_in_table
- 1;
2350 info_out_entry_ptr
= out_lli_table_ptr
+
2351 num_entries_out_table
- 1;
2353 dev_dbg(&sep
->pdev
->dev
,
2354 "[PID%d] output num_entries_out_table is %x\n",
2356 (u32
)num_entries_out_table
);
2357 dev_dbg(&sep
->pdev
->dev
,
2358 "[PID%d] output info_in_entry_ptr is %lx\n",
2360 (unsigned long)info_in_entry_ptr
);
2361 dev_dbg(&sep
->pdev
->dev
,
2362 "[PID%d] output info_out_entry_ptr is %lx\n",
2364 (unsigned long)info_out_entry_ptr
);
2367 /* Print input tables */
2368 if (!dmatables_region
) {
2369 sep_debug_print_lli_tables(
2371 (struct sep_lli_entry
*)
2372 sep_shared_area_bus_to_virt(sep
, *lli_table_in_ptr
),
2373 *in_num_entries_ptr
,
2374 *table_data_size_ptr
);
2377 /* Print output tables */
2378 if (!dmatables_region
) {
2379 sep_debug_print_lli_tables(
2381 (struct sep_lli_entry
*)
2382 sep_shared_area_bus_to_virt(sep
, *lli_table_out_ptr
),
2383 *out_num_entries_ptr
,
2384 *table_data_size_ptr
);
2391 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2392 * @app_virt_in_addr:
2393 * @app_virt_out_addr:
2396 * @lli_table_in_ptr:
2397 * @lli_table_out_ptr:
2398 * @in_num_entries_ptr:
2399 * @out_num_entries_ptr:
2400 * @table_data_size_ptr:
2401 * @is_kva: set for kernel data; used only for kernel crypto module
2403 * This function builds input and output DMA tables for synhronic
2404 * symmetric operations (AES, DES, HASH). It also checks that each table
2405 * is of the modular block size
2406 * Note that all bus addresses that are passed to the SEP
2407 * are in 32 bit format; the SEP is a 32 bit device
2409 static int sep_prepare_input_output_dma_table(struct sep_device
*sep
,
2410 unsigned long app_virt_in_addr
,
2411 unsigned long app_virt_out_addr
,
2414 dma_addr_t
*lli_table_in_ptr
,
2415 dma_addr_t
*lli_table_out_ptr
,
2416 u32
*in_num_entries_ptr
,
2417 u32
*out_num_entries_ptr
,
2418 u32
*table_data_size_ptr
,
2420 void **dmatables_region
,
2421 struct sep_dma_context
*dma_ctx
)
2425 /* Array of pointers of page */
2426 struct sep_lli_entry
*lli_in_array
;
2427 /* Array of pointers of page */
2428 struct sep_lli_entry
*lli_out_array
;
2435 if (data_size
== 0) {
2436 /* Prepare empty table for input and output */
2437 if (dmatables_region
) {
2438 error
= sep_allocate_dmatables_region(
2446 sep_prepare_empty_lli_table(sep
, lli_table_in_ptr
,
2447 in_num_entries_ptr
, table_data_size_ptr
,
2448 dmatables_region
, dma_ctx
);
2450 sep_prepare_empty_lli_table(sep
, lli_table_out_ptr
,
2451 out_num_entries_ptr
, table_data_size_ptr
,
2452 dmatables_region
, dma_ctx
);
2454 goto update_dcb_counter
;
2457 /* Initialize the pages pointers */
2458 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2459 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2461 /* Lock the pages of the buffer and translate them to pages */
2462 if (is_kva
== true) {
2463 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel input pages\n",
2465 error
= sep_lock_kernel_pages(sep
, app_virt_in_addr
,
2466 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2469 dev_warn(&sep
->pdev
->dev
,
2470 "[PID%d] sep_lock_kernel_pages for input "
2471 "virtual buffer failed\n", current
->pid
);
2476 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel output pages\n",
2478 error
= sep_lock_kernel_pages(sep
, app_virt_out_addr
,
2479 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2483 dev_warn(&sep
->pdev
->dev
,
2484 "[PID%d] sep_lock_kernel_pages for output "
2485 "virtual buffer failed\n", current
->pid
);
2487 goto end_function_free_lli_in
;
2493 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking user input pages\n",
2495 error
= sep_lock_user_pages(sep
, app_virt_in_addr
,
2496 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2499 dev_warn(&sep
->pdev
->dev
,
2500 "[PID%d] sep_lock_user_pages for input "
2501 "virtual buffer failed\n", current
->pid
);
2506 if (dma_ctx
->secure_dma
== true) {
2507 /* secure_dma requires use of non accessible memory */
2508 dev_dbg(&sep
->pdev
->dev
, "[PID%d] in secure_dma\n",
2510 error
= sep_lli_table_secure_dma(sep
,
2511 app_virt_out_addr
, data_size
, &lli_out_array
,
2512 SEP_DRIVER_OUT_FLAG
, dma_ctx
);
2514 dev_warn(&sep
->pdev
->dev
,
2515 "[PID%d] secure dma table setup "
2516 " for output virtual buffer failed\n",
2519 goto end_function_free_lli_in
;
2522 /* For normal, non-secure dma */
2523 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not in secure_dma\n",
2526 dev_dbg(&sep
->pdev
->dev
,
2527 "[PID%d] Locking user output pages\n",
2530 error
= sep_lock_user_pages(sep
, app_virt_out_addr
,
2531 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2535 dev_warn(&sep
->pdev
->dev
,
2536 "[PID%d] sep_lock_user_pages"
2537 " for output virtual buffer failed\n",
2540 goto end_function_free_lli_in
;
2545 dev_dbg(&sep
->pdev
->dev
, "[PID%d] After lock; prep input output dma "
2546 "table sep_in_num_pages is (hex) %x\n", current
->pid
,
2547 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
2549 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_out_num_pages is (hex) %x\n",
2551 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
);
2553 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP"
2554 " is (hex) %x\n", current
->pid
,
2555 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
2557 /* Call the fucntion that creates table from the lli arrays */
2558 dev_dbg(&sep
->pdev
->dev
, "[PID%d] calling create table from lli\n",
2560 error
= sep_construct_dma_tables_from_lli(
2562 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2565 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2567 block_size
, lli_table_in_ptr
, lli_table_out_ptr
,
2568 in_num_entries_ptr
, out_num_entries_ptr
,
2569 table_data_size_ptr
, dmatables_region
, dma_ctx
);
2572 dev_warn(&sep
->pdev
->dev
,
2573 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2575 goto end_function_with_error
;
2578 kfree(lli_out_array
);
2579 kfree(lli_in_array
);
2582 /* Update DCB counter */
2583 dma_ctx
->nr_dcb_creat
++;
2587 end_function_with_error
:
2588 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
);
2589 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
2590 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
);
2591 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2592 kfree(lli_out_array
);
2595 end_function_free_lli_in
:
2596 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2597 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2598 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2599 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2600 kfree(lli_in_array
);
2609 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2610 * @app_in_address: unsigned long; for data buffer in (user space)
2611 * @app_out_address: unsigned long; for data buffer out (user space)
2612 * @data_in_size: u32; for size of data
2613 * @block_size: u32; for block size
2614 * @tail_block_size: u32; for size of tail block
2615 * @isapplet: bool; to indicate external app
2616 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2617 * @secure_dma; indicates whether this is secure_dma using IMR
2619 * This function prepares the linked DMA tables and puts the
2620 * address for the linked list of tables inta a DCB (data control
2621 * block) the address of which is known by the SEP hardware
2622 * Note that all bus addresses that are passed to the SEP
2623 * are in 32 bit format; the SEP is a 32 bit device
2625 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device
*sep
,
2626 unsigned long app_in_address
,
2627 unsigned long app_out_address
,
2630 u32 tail_block_size
,
2634 struct sep_dcblock
*dcb_region
,
2635 void **dmatables_region
,
2636 struct sep_dma_context
**dma_ctx
,
2637 struct scatterlist
*src_sg
,
2638 struct scatterlist
*dst_sg
)
2643 /* Address of the created DCB table */
2644 struct sep_dcblock
*dcb_table_ptr
= NULL
;
2645 /* The physical address of the first input DMA table */
2646 dma_addr_t in_first_mlli_address
= 0;
2647 /* Number of entries in the first input DMA table */
2648 u32 in_first_num_entries
= 0;
2649 /* The physical address of the first output DMA table */
2650 dma_addr_t out_first_mlli_address
= 0;
2651 /* Number of entries in the first output DMA table */
2652 u32 out_first_num_entries
= 0;
2653 /* Data in the first input/output table */
2654 u32 first_data_size
= 0;
2656 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_in_address %lx\n",
2657 current
->pid
, app_in_address
);
2659 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_out_address %lx\n",
2660 current
->pid
, app_out_address
);
2662 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_in_size %x\n",
2663 current
->pid
, data_in_size
);
2665 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size %x\n",
2666 current
->pid
, block_size
);
2668 dev_dbg(&sep
->pdev
->dev
, "[PID%d] tail_block_size %x\n",
2669 current
->pid
, tail_block_size
);
2671 dev_dbg(&sep
->pdev
->dev
, "[PID%d] isapplet %x\n",
2672 current
->pid
, isapplet
);
2674 dev_dbg(&sep
->pdev
->dev
, "[PID%d] is_kva %x\n",
2675 current
->pid
, is_kva
);
2677 dev_dbg(&sep
->pdev
->dev
, "[PID%d] src_sg %p\n",
2678 current
->pid
, src_sg
);
2680 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dst_sg %p\n",
2681 current
->pid
, dst_sg
);
2684 dev_warn(&sep
->pdev
->dev
, "[PID%d] no DMA context pointer\n",
2691 /* In case there are multiple DCBs for this transaction */
2692 dev_dbg(&sep
->pdev
->dev
, "[PID%d] DMA context already set\n",
2695 *dma_ctx
= kzalloc(sizeof(**dma_ctx
), GFP_KERNEL
);
2697 dev_dbg(&sep
->pdev
->dev
,
2698 "[PID%d] Not enough memory for DMA context\n",
2703 dev_dbg(&sep
->pdev
->dev
,
2704 "[PID%d] Created DMA context addr at 0x%p\n",
2705 current
->pid
, *dma_ctx
);
2708 (*dma_ctx
)->secure_dma
= secure_dma
;
2710 /* these are for kernel crypto only */
2711 (*dma_ctx
)->src_sg
= src_sg
;
2712 (*dma_ctx
)->dst_sg
= dst_sg
;
2714 if ((*dma_ctx
)->nr_dcb_creat
== SEP_MAX_NUM_SYNC_DMA_OPS
) {
2715 /* No more DCBs to allocate */
2716 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no more DCBs available\n",
2719 goto end_function_error
;
2722 /* Allocate new DCB */
2724 dcb_table_ptr
= dcb_region
;
2726 dcb_table_ptr
= (struct sep_dcblock
*)(sep
->shared_addr
+
2727 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
+
2728 ((*dma_ctx
)->nr_dcb_creat
*
2729 sizeof(struct sep_dcblock
)));
2732 /* Set the default values in the DCB */
2733 dcb_table_ptr
->input_mlli_address
= 0;
2734 dcb_table_ptr
->input_mlli_num_entries
= 0;
2735 dcb_table_ptr
->input_mlli_data_size
= 0;
2736 dcb_table_ptr
->output_mlli_address
= 0;
2737 dcb_table_ptr
->output_mlli_num_entries
= 0;
2738 dcb_table_ptr
->output_mlli_data_size
= 0;
2739 dcb_table_ptr
->tail_data_size
= 0;
2740 dcb_table_ptr
->out_vr_tail_pt
= 0;
2742 if (isapplet
== true) {
2744 /* Check if there is enough data for DMA operation */
2745 if (data_in_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
) {
2746 if (is_kva
== true) {
2748 goto end_function_error
;
2750 if (copy_from_user(dcb_table_ptr
->tail_data
,
2751 (void __user
*)app_in_address
,
2754 goto end_function_error
;
2758 dcb_table_ptr
->tail_data_size
= data_in_size
;
2760 /* Set the output user-space address for mem2mem op */
2761 if (app_out_address
)
2762 dcb_table_ptr
->out_vr_tail_pt
=
2763 (aligned_u64
)app_out_address
;
2766 * Update both data length parameters in order to avoid
2767 * second data copy and allow building of empty mlli
2774 if (!app_out_address
) {
2775 tail_size
= data_in_size
% block_size
;
2777 if (tail_block_size
== block_size
)
2778 tail_size
= block_size
;
2785 if (tail_size
> sizeof(dcb_table_ptr
->tail_data
))
2787 if (is_kva
== true) {
2789 goto end_function_error
;
2791 /* We have tail data - copy it to DCB */
2792 if (copy_from_user(dcb_table_ptr
->tail_data
,
2793 (void __user
*)(app_in_address
+
2794 data_in_size
- tail_size
), tail_size
)) {
2796 goto end_function_error
;
2799 if (app_out_address
)
2801 * Calculate the output address
2802 * according to tail data size
2804 dcb_table_ptr
->out_vr_tail_pt
=
2805 (aligned_u64
)app_out_address
+
2806 data_in_size
- tail_size
;
2808 /* Save the real tail data size */
2809 dcb_table_ptr
->tail_data_size
= tail_size
;
2811 * Update the data size without the tail
2812 * data size AKA data for the dma
2814 data_in_size
= (data_in_size
- tail_size
);
2817 /* Check if we need to build only input table or input/output */
2818 if (app_out_address
) {
2819 /* Prepare input/output tables */
2820 error
= sep_prepare_input_output_dma_table(sep
,
2825 &in_first_mlli_address
,
2826 &out_first_mlli_address
,
2827 &in_first_num_entries
,
2828 &out_first_num_entries
,
2834 /* Prepare input tables */
2835 error
= sep_prepare_input_dma_table(sep
,
2839 &in_first_mlli_address
,
2840 &in_first_num_entries
,
2848 dev_warn(&sep
->pdev
->dev
,
2849 "prepare DMA table call failed "
2850 "from prepare DCB call\n");
2851 goto end_function_error
;
2854 /* Set the DCB values */
2855 dcb_table_ptr
->input_mlli_address
= in_first_mlli_address
;
2856 dcb_table_ptr
->input_mlli_num_entries
= in_first_num_entries
;
2857 dcb_table_ptr
->input_mlli_data_size
= first_data_size
;
2858 dcb_table_ptr
->output_mlli_address
= out_first_mlli_address
;
2859 dcb_table_ptr
->output_mlli_num_entries
= out_first_num_entries
;
2860 dcb_table_ptr
->output_mlli_data_size
= first_data_size
;
2875 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2876 * @sep: pointer to struct sep_device
2877 * @isapplet: indicates external application (used for kernel access)
2878 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2880 * This function frees the DMA tables and DCB
2882 static int sep_free_dma_tables_and_dcb(struct sep_device
*sep
, bool isapplet
,
2883 bool is_kva
, struct sep_dma_context
**dma_ctx
)
2885 struct sep_dcblock
*dcb_table_ptr
;
2886 unsigned long pt_hold
;
2893 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb\n",
2896 if (((*dma_ctx
)->secure_dma
== false) && (isapplet
== true)) {
2897 dev_dbg(&sep
->pdev
->dev
, "[PID%d] handling applet\n",
2900 /* Tail stuff is only for non secure_dma */
2901 /* Set pointer to first DCB table */
2902 dcb_table_ptr
= (struct sep_dcblock
*)
2904 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
);
2907 * Go over each DCB and see if
2908 * tail pointer must be updated
2910 for (i
= 0; dma_ctx
&& *dma_ctx
&&
2911 i
< (*dma_ctx
)->nr_dcb_creat
; i
++, dcb_table_ptr
++) {
2912 if (dcb_table_ptr
->out_vr_tail_pt
) {
2913 pt_hold
= (unsigned long)dcb_table_ptr
->
2915 tail_pt
= (void *)pt_hold
;
2916 if (is_kva
== true) {
2920 error_temp
= copy_to_user(
2921 (void __user
*)tail_pt
,
2922 dcb_table_ptr
->tail_data
,
2923 dcb_table_ptr
->tail_data_size
);
2926 /* Release the DMA resource */
2934 /* Free the output pages, if any */
2935 sep_free_dma_table_data_handler(sep
, dma_ctx
);
2937 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2944 * sep_prepare_dcb_handler - prepare a control block
2945 * @sep: pointer to struct sep_device
2946 * @arg: pointer to user parameters
2947 * @secure_dma: indicate whether we are using secure_dma on IMR
2949 * This function will retrieve the RAR buffer physical addresses, type
2950 * & size corresponding to the RAR handles provided in the buffers vector.
2952 static int sep_prepare_dcb_handler(struct sep_device
*sep
, unsigned long arg
,
2954 struct sep_dma_context
**dma_ctx
)
2957 /* Command arguments */
2958 static struct build_dcb_struct command_args
;
2960 /* Get the command arguments */
2961 if (copy_from_user(&command_args
, (void __user
*)arg
,
2962 sizeof(struct build_dcb_struct
))) {
2967 dev_dbg(&sep
->pdev
->dev
,
2968 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2969 current
->pid
, command_args
.app_in_address
);
2970 dev_dbg(&sep
->pdev
->dev
,
2971 "[PID%d] app_out_address is %08llx\n",
2972 current
->pid
, command_args
.app_out_address
);
2973 dev_dbg(&sep
->pdev
->dev
,
2974 "[PID%d] data_size is %x\n",
2975 current
->pid
, command_args
.data_in_size
);
2976 dev_dbg(&sep
->pdev
->dev
,
2977 "[PID%d] block_size is %x\n",
2978 current
->pid
, command_args
.block_size
);
2979 dev_dbg(&sep
->pdev
->dev
,
2980 "[PID%d] tail block_size is %x\n",
2981 current
->pid
, command_args
.tail_block_size
);
2982 dev_dbg(&sep
->pdev
->dev
,
2983 "[PID%d] is_applet is %x\n",
2984 current
->pid
, command_args
.is_applet
);
2986 if (!command_args
.app_in_address
) {
2987 dev_warn(&sep
->pdev
->dev
,
2988 "[PID%d] null app_in_address\n", current
->pid
);
2993 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
2994 (unsigned long)command_args
.app_in_address
,
2995 (unsigned long)command_args
.app_out_address
,
2996 command_args
.data_in_size
, command_args
.block_size
,
2997 command_args
.tail_block_size
,
2998 command_args
.is_applet
, false,
2999 secure_dma
, NULL
, NULL
, dma_ctx
, NULL
, NULL
);
3007 * sep_free_dcb_handler - free control block resources
3008 * @sep: pointer to struct sep_device
3010 * This function frees the DCB resources and updates the needed
3011 * user-space buffers.
3013 static int sep_free_dcb_handler(struct sep_device
*sep
,
3014 struct sep_dma_context
**dma_ctx
)
3016 if (!dma_ctx
|| !(*dma_ctx
)) {
3017 dev_dbg(&sep
->pdev
->dev
,
3018 "[PID%d] no dma context defined, nothing to free\n",
3023 dev_dbg(&sep
->pdev
->dev
, "[PID%d] free dcbs num of DCBs %x\n",
3025 (*dma_ctx
)->nr_dcb_creat
);
3027 return sep_free_dma_tables_and_dcb(sep
, false, false, dma_ctx
);
3031 * sep_ioctl - ioctl handler for sep device
3032 * @filp: pointer to struct file
3034 * @arg: pointer to argument structure
3036 * Implement the ioctl methods availble on the SEP device.
3038 static long sep_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3040 struct sep_private_data
* const private_data
= filp
->private_data
;
3041 struct sep_call_status
*call_status
= &private_data
->call_status
;
3042 struct sep_device
*sep
= private_data
->device
;
3043 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3044 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3047 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl cmd 0x%x\n",
3049 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma context addr 0x%p\n",
3050 current
->pid
, *dma_ctx
);
3052 /* Make sure we own this device */
3053 error
= sep_check_transaction_owner(sep
);
3055 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl pid is not owner\n",
3060 /* Check that sep_mmap has been called before */
3061 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET
,
3062 &call_status
->status
)) {
3063 dev_dbg(&sep
->pdev
->dev
,
3064 "[PID%d] mmap not called\n", current
->pid
);
3069 /* Check that the command is for SEP device */
3070 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
) {
3076 case SEP_IOCSENDSEPCOMMAND
:
3077 dev_dbg(&sep
->pdev
->dev
,
3078 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3080 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3081 &call_status
->status
)) {
3082 dev_warn(&sep
->pdev
->dev
,
3083 "[PID%d] send msg already done\n",
3088 /* Send command to SEP */
3089 error
= sep_send_command_handler(sep
);
3091 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3092 &call_status
->status
);
3093 dev_dbg(&sep
->pdev
->dev
,
3094 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3097 case SEP_IOCENDTRANSACTION
:
3098 dev_dbg(&sep
->pdev
->dev
,
3099 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3101 error
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3103 dev_dbg(&sep
->pdev
->dev
,
3104 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3107 case SEP_IOCPREPAREDCB
:
3108 dev_dbg(&sep
->pdev
->dev
,
3109 "[PID%d] SEP_IOCPREPAREDCB start\n",
3111 case SEP_IOCPREPAREDCB_SECURE_DMA
:
3112 dev_dbg(&sep
->pdev
->dev
,
3113 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3115 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3116 &call_status
->status
)) {
3117 dev_dbg(&sep
->pdev
->dev
,
3118 "[PID%d] dcb prep needed before send msg\n",
3125 dev_dbg(&sep
->pdev
->dev
,
3126 "[PID%d] dcb null arg\n", current
->pid
);
3131 if (cmd
== SEP_IOCPREPAREDCB
) {
3133 dev_dbg(&sep
->pdev
->dev
,
3134 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3137 error
= sep_prepare_dcb_handler(sep
, arg
, false,
3141 dev_dbg(&sep
->pdev
->dev
,
3142 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3145 error
= sep_prepare_dcb_handler(sep
, arg
, true,
3148 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dcb's end\n",
3151 case SEP_IOCFREEDCB
:
3152 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB start\n",
3154 case SEP_IOCFREEDCB_SECURE_DMA
:
3155 dev_dbg(&sep
->pdev
->dev
,
3156 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3158 error
= sep_free_dcb_handler(sep
, dma_ctx
);
3159 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB end\n",
3164 dev_dbg(&sep
->pdev
->dev
, "[PID%d] default end\n",
3170 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl end\n", current
->pid
);
3176 * sep_inthandler - interrupt handler for sep device
3178 * @dev_id: device id
3180 static irqreturn_t
sep_inthandler(int irq
, void *dev_id
)
3182 unsigned long lock_irq_flag
;
3183 u32 reg_val
, reg_val2
= 0;
3184 struct sep_device
*sep
= dev_id
;
3185 irqreturn_t int_error
= IRQ_HANDLED
;
3187 /* Are we in power save? */
3188 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3189 if (sep
->pdev
->dev
.power
.runtime_status
!= RPM_ACTIVE
) {
3190 dev_dbg(&sep
->pdev
->dev
, "interrupt during pwr save\n");
3195 if (test_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
) == 0) {
3196 dev_dbg(&sep
->pdev
->dev
, "interrupt while nobody using sep\n");
3200 /* Read the IRR register to check if this is SEP interrupt */
3201 reg_val
= sep_read_reg(sep
, HW_HOST_IRR_REG_ADDR
);
3203 dev_dbg(&sep
->pdev
->dev
, "sep int: IRR REG val: %x\n", reg_val
);
3205 if (reg_val
& (0x1 << 13)) {
3207 /* Lock and update the counter of reply messages */
3208 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
3210 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
3212 dev_dbg(&sep
->pdev
->dev
, "sep int: send_ct %lx reply_ct %lx\n",
3213 sep
->send_ct
, sep
->reply_ct
);
3215 /* Is this a kernel client request */
3216 if (sep
->in_kernel
) {
3217 tasklet_schedule(&sep
->finish_tasklet
);
3218 goto finished_interrupt
;
3221 /* Is this printf or daemon request? */
3222 reg_val2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
3223 dev_dbg(&sep
->pdev
->dev
,
3224 "SEP Interrupt - GPR2 is %08x\n", reg_val2
);
3226 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
3228 if ((reg_val2
>> 30) & 0x1) {
3229 dev_dbg(&sep
->pdev
->dev
, "int: printf request\n");
3230 } else if (reg_val2
>> 31) {
3231 dev_dbg(&sep
->pdev
->dev
, "int: daemon request\n");
3233 dev_dbg(&sep
->pdev
->dev
, "int: SEP reply\n");
3234 wake_up(&sep
->event_interrupt
);
3237 dev_dbg(&sep
->pdev
->dev
, "int: not SEP interrupt\n");
3238 int_error
= IRQ_NONE
;
3243 if (int_error
== IRQ_HANDLED
)
3244 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, reg_val
);
3250 * sep_reconfig_shared_area - reconfigure shared area
3251 * @sep: pointer to struct sep_device
3253 * Reconfig the shared area between HOST and SEP - needed in case
3254 * the DX_CC_Init function was called before OS loading.
3256 static int sep_reconfig_shared_area(struct sep_device
*sep
)
3260 /* use to limit waiting for SEP */
3261 unsigned long end_time
;
3263 /* Send the new SHARED MESSAGE AREA to the SEP */
3264 dev_dbg(&sep
->pdev
->dev
, "reconfig shared; sending %08llx to sep\n",
3265 (unsigned long long)sep
->shared_bus
);
3267 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR1_REG_ADDR
, sep
->shared_bus
);
3269 /* Poll for SEP response */
3270 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3272 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
3274 while ((time_before(jiffies
, end_time
)) && (ret_val
!= 0xffffffff) &&
3275 (ret_val
!= sep
->shared_bus
))
3276 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3278 /* Check the return value (register) */
3279 if (ret_val
!= sep
->shared_bus
) {
3280 dev_warn(&sep
->pdev
->dev
, "could not reconfig shared area\n");
3281 dev_warn(&sep
->pdev
->dev
, "result was %x\n", ret_val
);
3286 dev_dbg(&sep
->pdev
->dev
, "reconfig shared area end\n");
3292 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3295 * @dcb_region: DCB region copy
3296 * @dmatables_region: MLLI/DMA tables copy
3297 * @dma_ctx: DMA context for current transaction
3299 ssize_t
sep_activate_dcb_dmatables_context(struct sep_device
*sep
,
3300 struct sep_dcblock
**dcb_region
,
3301 void **dmatables_region
,
3302 struct sep_dma_context
*dma_ctx
)
3304 void *dmaregion_free_start
= NULL
;
3305 void *dmaregion_free_end
= NULL
;
3306 void *dcbregion_free_start
= NULL
;
3307 void *dcbregion_free_end
= NULL
;
3310 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating dcb/dma region\n",
3313 if (1 > dma_ctx
->nr_dcb_creat
) {
3314 dev_warn(&sep
->pdev
->dev
,
3315 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3316 current
->pid
, dma_ctx
->nr_dcb_creat
);
3321 dmaregion_free_start
= sep
->shared_addr
3322 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
;
3323 dmaregion_free_end
= dmaregion_free_start
3324 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
3326 if (dmaregion_free_start
3327 + dma_ctx
->dmatables_len
> dmaregion_free_end
) {
3331 memcpy(dmaregion_free_start
,
3333 dma_ctx
->dmatables_len
);
3334 /* Free MLLI table copy */
3335 kfree(*dmatables_region
);
3336 *dmatables_region
= NULL
;
3338 /* Copy thread's DCB table copy to DCB table region */
3339 dcbregion_free_start
= sep
->shared_addr
+
3340 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
;
3341 dcbregion_free_end
= dcbregion_free_start
+
3342 (SEP_MAX_NUM_SYNC_DMA_OPS
*
3343 sizeof(struct sep_dcblock
)) - 1;
3345 if (dcbregion_free_start
3346 + (dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
))
3347 > dcbregion_free_end
) {
3352 memcpy(dcbregion_free_start
,
3354 dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
));
3356 /* Print the tables */
3357 dev_dbg(&sep
->pdev
->dev
, "activate: input table\n");
3358 sep_debug_print_lli_tables(sep
,
3359 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3360 (*dcb_region
)->input_mlli_address
),
3361 (*dcb_region
)->input_mlli_num_entries
,
3362 (*dcb_region
)->input_mlli_data_size
);
3364 dev_dbg(&sep
->pdev
->dev
, "activate: output table\n");
3365 sep_debug_print_lli_tables(sep
,
3366 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3367 (*dcb_region
)->output_mlli_address
),
3368 (*dcb_region
)->output_mlli_num_entries
,
3369 (*dcb_region
)->output_mlli_data_size
);
3371 dev_dbg(&sep
->pdev
->dev
,
3372 "[PID%d] printing activated tables\n", current
->pid
);
3375 kfree(*dmatables_region
);
3376 *dmatables_region
= NULL
;
3385 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3387 * @dcb_region: DCB region buf to create for current transaction
3388 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3389 * @dma_ctx: DMA context buf to create for current transaction
3390 * @user_dcb_args: User arguments for DCB/MLLI creation
3391 * @num_dcbs: Number of DCBs to create
3392 * @secure_dma: Indicate use of IMR restricted memory secure dma
3394 static ssize_t
sep_create_dcb_dmatables_context(struct sep_device
*sep
,
3395 struct sep_dcblock
**dcb_region
,
3396 void **dmatables_region
,
3397 struct sep_dma_context
**dma_ctx
,
3398 const struct build_dcb_struct __user
*user_dcb_args
,
3399 const u32 num_dcbs
, bool secure_dma
)
3403 struct build_dcb_struct
*dcb_args
= NULL
;
3405 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3408 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !user_dcb_args
) {
3413 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3414 dev_warn(&sep
->pdev
->dev
,
3415 "[PID%d] invalid number of dcbs 0x%08X\n",
3416 current
->pid
, num_dcbs
);
3421 dcb_args
= kzalloc(num_dcbs
* sizeof(struct build_dcb_struct
),
3424 dev_warn(&sep
->pdev
->dev
, "[PID%d] no memory for dcb args\n",
3430 if (copy_from_user(dcb_args
,
3432 num_dcbs
* sizeof(struct build_dcb_struct
))) {
3437 /* Allocate thread-specific memory for DCB */
3438 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3440 if (!(*dcb_region
)) {
3445 /* Prepare DCB and MLLI table into the allocated regions */
3446 for (i
= 0; i
< num_dcbs
; i
++) {
3447 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3448 (unsigned long)dcb_args
[i
].app_in_address
,
3449 (unsigned long)dcb_args
[i
].app_out_address
,
3450 dcb_args
[i
].data_in_size
,
3451 dcb_args
[i
].block_size
,
3452 dcb_args
[i
].tail_block_size
,
3453 dcb_args
[i
].is_applet
,
3455 *dcb_region
, dmatables_region
,
3460 dev_warn(&sep
->pdev
->dev
,
3461 "[PID%d] dma table creation failed\n",
3466 if (dcb_args
[i
].app_in_address
!= 0)
3467 (*dma_ctx
)->input_data_len
+= dcb_args
[i
].data_in_size
;
3477 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3480 * @dcb_region: DCB region buf to create for current transaction
3481 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3482 * @dma_ctx: DMA context buf to create for current transaction
3483 * @user_dcb_args: User arguments for DCB/MLLI creation
3484 * @num_dcbs: Number of DCBs to create
3485 * This does that same thing as sep_create_dcb_dmatables_context
3486 * except that it is used only for the kernel crypto operation. It is
3487 * separate because there is no user data involved; the dcb data structure
3488 * is specific for kernel crypto (build_dcb_struct_kernel)
3490 int sep_create_dcb_dmatables_context_kernel(struct sep_device
*sep
,
3491 struct sep_dcblock
**dcb_region
,
3492 void **dmatables_region
,
3493 struct sep_dma_context
**dma_ctx
,
3494 const struct build_dcb_struct_kernel
*dcb_data
,
3500 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3503 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !dcb_data
) {
3508 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3509 dev_warn(&sep
->pdev
->dev
,
3510 "[PID%d] invalid number of dcbs 0x%08X\n",
3511 current
->pid
, num_dcbs
);
3516 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_dcbs is %d\n",
3517 current
->pid
, num_dcbs
);
3519 /* Allocate thread-specific memory for DCB */
3520 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3522 if (!(*dcb_region
)) {
3527 /* Prepare DCB and MLLI table into the allocated regions */
3528 for (i
= 0; i
< num_dcbs
; i
++) {
3529 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3530 (unsigned long)dcb_data
->app_in_address
,
3531 (unsigned long)dcb_data
->app_out_address
,
3532 dcb_data
->data_in_size
,
3533 dcb_data
->block_size
,
3534 dcb_data
->tail_block_size
,
3535 dcb_data
->is_applet
,
3538 *dcb_region
, dmatables_region
,
3543 dev_warn(&sep
->pdev
->dev
,
3544 "[PID%d] dma table creation failed\n",
3556 * sep_activate_msgarea_context - Takes the message area context into use
3558 * @msg_region: Message area context buf
3559 * @msg_len: Message area context buffer size
3561 static ssize_t
sep_activate_msgarea_context(struct sep_device
*sep
,
3563 const size_t msg_len
)
3565 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating msg region\n",
3568 if (!msg_region
|| !(*msg_region
) ||
3569 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
< msg_len
) {
3570 dev_warn(&sep
->pdev
->dev
,
3571 "[PID%d] invalid act msgarea len 0x%08zX\n",
3572 current
->pid
, msg_len
);
3576 memcpy(sep
->shared_addr
, *msg_region
, msg_len
);
3582 * sep_create_msgarea_context - Creates message area context
3584 * @msg_region: Msg area region buf to create for current transaction
3585 * @msg_user: Content for msg area region from user
3586 * @msg_len: Message area size
3588 static ssize_t
sep_create_msgarea_context(struct sep_device
*sep
,
3590 const void __user
*msg_user
,
3591 const size_t msg_len
)
3595 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating msg region\n",
3600 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< msg_len
||
3601 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> msg_len
) {
3602 dev_warn(&sep
->pdev
->dev
,
3603 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3604 current
->pid
, msg_len
);
3609 /* Allocate thread-specific memory for message buffer */
3610 *msg_region
= kzalloc(msg_len
, GFP_KERNEL
);
3611 if (!(*msg_region
)) {
3612 dev_warn(&sep
->pdev
->dev
,
3613 "[PID%d] no mem for msgarea context\n",
3619 /* Copy input data to write() to allocated message buffer */
3620 if (copy_from_user(*msg_region
, msg_user
, msg_len
)) {
3626 if (error
&& msg_region
) {
3636 * sep_read - Returns results of an operation for fastcall interface
3637 * @filp: File pointer
3638 * @buf_user: User buffer for storing results
3639 * @count_user: User buffer size
3640 * @offset: File offset, not supported
3642 * The implementation does not support reading in chunks, all data must be
3643 * consumed during a single read system call.
3645 static ssize_t
sep_read(struct file
*filp
,
3646 char __user
*buf_user
, size_t count_user
,
3649 struct sep_private_data
* const private_data
= filp
->private_data
;
3650 struct sep_call_status
*call_status
= &private_data
->call_status
;
3651 struct sep_device
*sep
= private_data
->device
;
3652 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3653 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3654 ssize_t error
= 0, error_tmp
= 0;
3656 /* Am I the process that owns the transaction? */
3657 error
= sep_check_transaction_owner(sep
);
3659 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read pid is not owner\n",
3664 /* Checks that user has called necessarry apis */
3665 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
,
3666 &call_status
->status
)) {
3667 dev_warn(&sep
->pdev
->dev
,
3668 "[PID%d] fastcall write not called\n",
3671 goto end_function_error
;
3675 dev_warn(&sep
->pdev
->dev
,
3676 "[PID%d] null user buffer\n",
3679 goto end_function_error
;
3683 /* Wait for SEP to finish */
3684 wait_event(sep
->event_interrupt
,
3685 test_bit(SEP_WORKING_LOCK_BIT
,
3686 &sep
->in_use_flags
) == 0);
3688 sep_dump_message(sep
);
3690 dev_dbg(&sep
->pdev
->dev
, "[PID%d] count_user = 0x%08zX\n",
3691 current
->pid
, count_user
);
3693 /* In case user has allocated bigger buffer */
3694 if (count_user
> SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
)
3695 count_user
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
;
3697 if (copy_to_user(buf_user
, sep
->shared_addr
, count_user
)) {
3699 goto end_function_error
;
3702 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read succeeded\n", current
->pid
);
3706 /* Copy possible tail data to user and free DCB and MLLIs */
3707 error_tmp
= sep_free_dcb_handler(sep
, dma_ctx
);
3709 dev_warn(&sep
->pdev
->dev
, "[PID%d] dcb free failed\n",
3712 /* End the transaction, wakeup pending ones */
3713 error_tmp
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3716 dev_warn(&sep
->pdev
->dev
,
3717 "[PID%d] ending transaction failed\n",
3725 * sep_fastcall_args_get - Gets fastcall params from user
3727 * @args: Parameters buffer
3728 * @buf_user: User buffer for operation parameters
3729 * @count_user: User buffer size
3731 static inline ssize_t
sep_fastcall_args_get(struct sep_device
*sep
,
3732 struct sep_fastcall_hdr
*args
,
3733 const char __user
*buf_user
,
3734 const size_t count_user
)
3737 size_t actual_count
= 0;
3740 dev_warn(&sep
->pdev
->dev
,
3741 "[PID%d] null user buffer\n",
3747 if (count_user
< sizeof(struct sep_fastcall_hdr
)) {
3748 dev_warn(&sep
->pdev
->dev
,
3749 "[PID%d] too small message size 0x%08zX\n",
3750 current
->pid
, count_user
);
3756 if (copy_from_user(args
, buf_user
, sizeof(struct sep_fastcall_hdr
))) {
3761 if (SEP_FC_MAGIC
!= args
->magic
) {
3762 dev_warn(&sep
->pdev
->dev
,
3763 "[PID%d] invalid fastcall magic 0x%08X\n",
3764 current
->pid
, args
->magic
);
3769 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3770 current
->pid
, args
->num_dcbs
);
3771 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr msg len 0x%08X\n",
3772 current
->pid
, args
->msg_len
);
3774 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< args
->msg_len
||
3775 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> args
->msg_len
) {
3776 dev_warn(&sep
->pdev
->dev
,
3777 "[PID%d] invalid message length\n",
3783 actual_count
= sizeof(struct sep_fastcall_hdr
)
3785 + (args
->num_dcbs
* sizeof(struct build_dcb_struct
));
3787 if (actual_count
!= count_user
) {
3788 dev_warn(&sep
->pdev
->dev
,
3789 "[PID%d] inconsistent message "
3790 "sizes 0x%08zX vs 0x%08zX\n",
3791 current
->pid
, actual_count
, count_user
);
3801 * sep_write - Starts an operation for fastcall interface
3802 * @filp: File pointer
3803 * @buf_user: User buffer for operation parameters
3804 * @count_user: User buffer size
3805 * @offset: File offset, not supported
3807 * The implementation does not support writing in chunks,
3808 * all data must be given during a single write system call.
3810 static ssize_t
sep_write(struct file
*filp
,
3811 const char __user
*buf_user
, size_t count_user
,
3814 struct sep_private_data
* const private_data
= filp
->private_data
;
3815 struct sep_call_status
*call_status
= &private_data
->call_status
;
3816 struct sep_device
*sep
= private_data
->device
;
3817 struct sep_dma_context
*dma_ctx
= NULL
;
3818 struct sep_fastcall_hdr call_hdr
= {0};
3819 void *msg_region
= NULL
;
3820 void *dmatables_region
= NULL
;
3821 struct sep_dcblock
*dcb_region
= NULL
;
3823 struct sep_queue_info
*my_queue_elem
= NULL
;
3824 bool my_secure_dma
; /* are we using secure_dma (IMR)? */
3826 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep dev is 0x%p\n",
3828 dev_dbg(&sep
->pdev
->dev
, "[PID%d] private_data is 0x%p\n",
3829 current
->pid
, private_data
);
3831 error
= sep_fastcall_args_get(sep
, &call_hdr
, buf_user
, count_user
);
3835 buf_user
+= sizeof(struct sep_fastcall_hdr
);
3837 if (call_hdr
.secure_dma
== 0)
3838 my_secure_dma
= false;
3840 my_secure_dma
= true;
3843 * Controlling driver memory usage by limiting amount of
3844 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3845 * of threads can progress further at a time
3847 dev_dbg(&sep
->pdev
->dev
, "[PID%d] waiting for double buffering "
3848 "region access\n", current
->pid
);
3849 error
= down_interruptible(&sep
->sep_doublebuf
);
3850 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region start\n",
3853 /* Signal received */
3854 goto end_function_error
;
3859 * Prepare contents of the shared area regions for
3860 * the operation into temporary buffers
3862 if (0 < call_hdr
.num_dcbs
) {
3863 error
= sep_create_dcb_dmatables_context(sep
,
3867 (const struct build_dcb_struct __user
*)
3869 call_hdr
.num_dcbs
, my_secure_dma
);
3871 goto end_function_error_doublebuf
;
3873 buf_user
+= call_hdr
.num_dcbs
* sizeof(struct build_dcb_struct
);
3876 error
= sep_create_msgarea_context(sep
,
3881 goto end_function_error_doublebuf
;
3883 dev_dbg(&sep
->pdev
->dev
, "[PID%d] updating queue status\n",
3885 my_queue_elem
= sep_queue_status_add(sep
,
3886 ((struct sep_msgarea_hdr
*)msg_region
)->opcode
,
3887 (dma_ctx
) ? dma_ctx
->input_data_len
: 0,
3889 current
->comm
, sizeof(current
->comm
));
3891 if (!my_queue_elem
) {
3892 dev_dbg(&sep
->pdev
->dev
, "[PID%d] updating queue"
3893 "status error\n", current
->pid
);
3895 goto end_function_error_doublebuf
;
3898 /* Wait until current process gets the transaction */
3899 error
= sep_wait_transaction(sep
);
3902 /* Interrupted by signal, don't clear transaction */
3903 dev_dbg(&sep
->pdev
->dev
, "[PID%d] interrupted by signal\n",
3905 sep_queue_status_remove(sep
, &my_queue_elem
);
3906 goto end_function_error_doublebuf
;
3909 dev_dbg(&sep
->pdev
->dev
, "[PID%d] saving queue element\n",
3911 private_data
->my_queue_elem
= my_queue_elem
;
3913 /* Activate shared area regions for the transaction */
3914 error
= sep_activate_msgarea_context(sep
, &msg_region
,
3917 goto end_function_error_clear_transact
;
3919 sep_dump_message(sep
);
3921 if (0 < call_hdr
.num_dcbs
) {
3922 error
= sep_activate_dcb_dmatables_context(sep
,
3927 goto end_function_error_clear_transact
;
3930 /* Send command to SEP */
3931 error
= sep_send_command_handler(sep
);
3933 goto end_function_error_clear_transact
;
3935 /* Store DMA context for the transaction */
3936 private_data
->dma_ctx
= dma_ctx
;
3937 /* Update call status */
3938 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
, &call_status
->status
);
3941 up(&sep
->sep_doublebuf
);
3942 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3947 end_function_error_clear_transact
:
3948 sep_end_transaction_handler(sep
, &dma_ctx
, call_status
,
3949 &private_data
->my_queue_elem
);
3951 end_function_error_doublebuf
:
3952 up(&sep
->sep_doublebuf
);
3953 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3958 sep_free_dma_table_data_handler(sep
, &dma_ctx
);
3962 kfree(dmatables_region
);
3968 * sep_seek - Handler for seek system call
3969 * @filp: File pointer
3970 * @offset: File offset
3971 * @origin: Options for offset
3973 * Fastcall interface does not support seeking, all reads
3974 * and writes are from/to offset zero
3976 static loff_t
sep_seek(struct file
*filp
, loff_t offset
, int origin
)
3984 * sep_file_operations - file operation on sep device
3985 * @sep_ioctl: ioctl handler from user space call
3986 * @sep_poll: poll handler
3987 * @sep_open: handles sep device open request
3988 * @sep_release:handles sep device release request
3989 * @sep_mmap: handles memory mapping requests
3990 * @sep_read: handles read request on sep device
3991 * @sep_write: handles write request on sep device
3992 * @sep_seek: handles seek request on sep device
3994 static const struct file_operations sep_file_operations
= {
3995 .owner
= THIS_MODULE
,
3996 .unlocked_ioctl
= sep_ioctl
,
3999 .release
= sep_release
,
4007 * sep_sysfs_read - read sysfs entry per gives arguments
4008 * @filp: file pointer
4009 * @kobj: kobject pointer
4010 * @attr: binary file attributes
4011 * @buf: read to this buffer
4012 * @pos: offset to read
4013 * @count: amount of data to read
4015 * This function is to read sysfs entries for sep driver per given arguments.
4018 sep_sysfs_read(struct file
*filp
, struct kobject
*kobj
,
4019 struct bin_attribute
*attr
,
4020 char *buf
, loff_t pos
, size_t count
)
4022 unsigned long lck_flags
;
4023 size_t nleft
= count
;
4024 struct sep_device
*sep
= sep_dev
;
4025 struct sep_queue_info
*queue_elem
= NULL
;
4029 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
4031 queue_num
= sep
->sep_queue_num
;
4032 if (queue_num
> SEP_DOUBLEBUF_USERS_LIMIT
)
4033 queue_num
= SEP_DOUBLEBUF_USERS_LIMIT
;
4036 if (count
< sizeof(queue_num
)
4037 + (queue_num
* sizeof(struct sep_queue_data
))) {
4038 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4042 memcpy(buf
, &queue_num
, sizeof(queue_num
));
4043 buf
+= sizeof(queue_num
);
4044 nleft
-= sizeof(queue_num
);
4046 list_for_each_entry(queue_elem
, &sep
->sep_queue_status
, list
) {
4047 if (i
++ > queue_num
)
4050 memcpy(buf
, &queue_elem
->data
, sizeof(queue_elem
->data
));
4051 nleft
-= sizeof(queue_elem
->data
);
4052 buf
+= sizeof(queue_elem
->data
);
4054 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4056 return count
- nleft
;
4060 * bin_attributes - defines attributes for queue_status
4061 * @attr: attributes (name & permissions)
4062 * @read: function pointer to read this file
4063 * @size: maxinum size of binary attribute
4065 static const struct bin_attribute queue_status
= {
4066 .attr
= {.name
= "queue_status", .mode
= 0444},
4067 .read
= sep_sysfs_read
,
4069 + (SEP_DOUBLEBUF_USERS_LIMIT
* sizeof(struct sep_queue_data
)),
4073 * sep_register_driver_with_fs - register misc devices
4074 * @sep: pointer to struct sep_device
4076 * This function registers the driver with the file system
4078 static int sep_register_driver_with_fs(struct sep_device
*sep
)
4082 sep
->miscdev_sep
.minor
= MISC_DYNAMIC_MINOR
;
4083 sep
->miscdev_sep
.name
= SEP_DEV_NAME
;
4084 sep
->miscdev_sep
.fops
= &sep_file_operations
;
4086 ret_val
= misc_register(&sep
->miscdev_sep
);
4088 dev_warn(&sep
->pdev
->dev
, "misc reg fails for SEP %x\n",
4093 ret_val
= device_create_bin_file(sep
->miscdev_sep
.this_device
,
4096 dev_warn(&sep
->pdev
->dev
, "sysfs attribute1 fails for SEP %x\n",
4106 *sep_probe - probe a matching PCI device
4108 *@ent: pci_device_id
4110 *Attempt to set up and configure a SEP device that has been
4111 *discovered by the PCI layer. Allocates all required resources.
4113 static int __devinit
sep_probe(struct pci_dev
*pdev
,
4114 const struct pci_device_id
*ent
)
4117 struct sep_device
*sep
= NULL
;
4119 if (sep_dev
!= NULL
) {
4120 dev_dbg(&pdev
->dev
, "only one SEP supported.\n");
4124 /* Enable the device */
4125 error
= pci_enable_device(pdev
);
4127 dev_warn(&pdev
->dev
, "error enabling pci device\n");
4131 /* Allocate the sep_device structure for this device */
4132 sep_dev
= kzalloc(sizeof(struct sep_device
), GFP_ATOMIC
);
4133 if (sep_dev
== NULL
) {
4134 dev_warn(&pdev
->dev
,
4135 "can't kmalloc the sep_device structure\n");
4137 goto end_function_disable_device
;
4141 * We're going to use another variable for actually
4142 * working with the device; this way, if we have
4143 * multiple devices in the future, it would be easier
4144 * to make appropriate changes
4148 sep
->pdev
= pci_dev_get(pdev
);
4150 init_waitqueue_head(&sep
->event_transactions
);
4151 init_waitqueue_head(&sep
->event_interrupt
);
4152 spin_lock_init(&sep
->snd_rply_lck
);
4153 spin_lock_init(&sep
->sep_queue_lock
);
4154 sema_init(&sep
->sep_doublebuf
, SEP_DOUBLEBUF_USERS_LIMIT
);
4156 INIT_LIST_HEAD(&sep
->sep_queue_status
);
4158 dev_dbg(&sep
->pdev
->dev
, "sep probe: PCI obtained, "
4159 "device being prepared\n");
4161 /* Set up our register area */
4162 sep
->reg_physical_addr
= pci_resource_start(sep
->pdev
, 0);
4163 if (!sep
->reg_physical_addr
) {
4164 dev_warn(&sep
->pdev
->dev
, "Error getting register start\n");
4166 goto end_function_free_sep_dev
;
4169 sep
->reg_physical_end
= pci_resource_end(sep
->pdev
, 0);
4170 if (!sep
->reg_physical_end
) {
4171 dev_warn(&sep
->pdev
->dev
, "Error getting register end\n");
4173 goto end_function_free_sep_dev
;
4176 sep
->reg_addr
= ioremap_nocache(sep
->reg_physical_addr
,
4177 (size_t)(sep
->reg_physical_end
- sep
->reg_physical_addr
+ 1));
4178 if (!sep
->reg_addr
) {
4179 dev_warn(&sep
->pdev
->dev
, "Error getting register virtual\n");
4181 goto end_function_free_sep_dev
;
4184 dev_dbg(&sep
->pdev
->dev
,
4185 "Register area start %llx end %llx virtual %p\n",
4186 (unsigned long long)sep
->reg_physical_addr
,
4187 (unsigned long long)sep
->reg_physical_end
,
4190 /* Allocate the shared area */
4191 sep
->shared_size
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
+
4192 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
+
4193 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
+
4194 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES
+
4195 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES
;
4197 if (sep_map_and_alloc_shared_area(sep
)) {
4199 /* Allocation failed */
4200 goto end_function_error
;
4203 /* Clear ICR register */
4204 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4206 /* Set the IMR register - open only GPR 2 */
4207 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4209 /* Read send/receive counters from SEP */
4210 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4211 sep
->reply_ct
&= 0x3FFFFFFF;
4212 sep
->send_ct
= sep
->reply_ct
;
4214 /* Get the interrupt line */
4215 error
= request_irq(pdev
->irq
, sep_inthandler
, IRQF_SHARED
,
4219 goto end_function_deallocate_sep_shared_area
;
4221 /* The new chip requires a shared area reconfigure */
4222 error
= sep_reconfig_shared_area(sep
);
4224 goto end_function_free_irq
;
4228 /* Finally magic up the device nodes */
4229 /* Register driver with the fs */
4230 error
= sep_register_driver_with_fs(sep
);
4233 dev_err(&sep
->pdev
->dev
, "error registering dev file\n");
4234 goto end_function_free_irq
;
4237 sep
->in_use
= 0; /* through touching the device */
4238 #ifdef SEP_ENABLE_RUNTIME_PM
4239 pm_runtime_put_noidle(&sep
->pdev
->dev
);
4240 pm_runtime_allow(&sep
->pdev
->dev
);
4241 pm_runtime_set_autosuspend_delay(&sep
->pdev
->dev
,
4243 pm_runtime_use_autosuspend(&sep
->pdev
->dev
);
4244 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
4245 sep
->power_save_setup
= 1;
4247 /* register kernel crypto driver */
4248 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4249 error
= sep_crypto_setup();
4251 dev_err(&sep
->pdev
->dev
, "crypto setup failed\n");
4252 goto end_function_free_irq
;
4257 end_function_free_irq
:
4258 free_irq(pdev
->irq
, sep
);
4260 end_function_deallocate_sep_shared_area
:
4261 /* De-allocate shared area */
4262 sep_unmap_and_free_shared_area(sep
);
4265 iounmap(sep
->reg_addr
);
4267 end_function_free_sep_dev
:
4268 pci_dev_put(sep_dev
->pdev
);
4272 end_function_disable_device
:
4273 pci_disable_device(pdev
);
4280 * sep_remove - handles removing device from pci subsystem
4281 * @pdev: pointer to pci device
4283 * This function will handle removing our sep device from pci subsystem on exit
4284 * or unloading this module. It should free up all used resources, and unmap if
4285 * any memory regions mapped.
4287 static void sep_remove(struct pci_dev
*pdev
)
4289 struct sep_device
*sep
= sep_dev
;
4291 /* Unregister from fs */
4292 misc_deregister(&sep
->miscdev_sep
);
4294 /* Unregister from kernel crypto */
4295 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4296 sep_crypto_takedown();
4299 free_irq(sep
->pdev
->irq
, sep
);
4301 /* Free the shared area */
4302 sep_unmap_and_free_shared_area(sep_dev
);
4303 iounmap(sep_dev
->reg_addr
);
4305 #ifdef SEP_ENABLE_RUNTIME_PM
4308 pm_runtime_forbid(&sep
->pdev
->dev
);
4309 pm_runtime_get_noresume(&sep
->pdev
->dev
);
4312 pci_dev_put(sep_dev
->pdev
);
4317 /* Initialize struct pci_device_id for our driver */
4318 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl
) = {
4319 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x0826)},
4320 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08e9)},
4324 /* Export our pci_device_id structure to user space */
4325 MODULE_DEVICE_TABLE(pci
, sep_pci_id_tbl
);
4327 #ifdef SEP_ENABLE_RUNTIME_PM
4330 * sep_pm_resume - rsume routine while waking up from S3 state
4331 * @dev: pointer to sep device
4333 * This function is to be used to wake up sep driver while system awakes from S3
4334 * state i.e. suspend to ram. The RAM in intact.
4335 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4337 static int sep_pci_resume(struct device
*dev
)
4339 struct sep_device
*sep
= sep_dev
;
4341 dev_dbg(&sep
->pdev
->dev
, "pci resume called\n");
4343 if (sep
->power_state
== SEP_DRIVER_POWERON
)
4346 /* Clear ICR register */
4347 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4349 /* Set the IMR register - open only GPR 2 */
4350 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4352 /* Read send/receive counters from SEP */
4353 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4354 sep
->reply_ct
&= 0x3FFFFFFF;
4355 sep
->send_ct
= sep
->reply_ct
;
4357 sep
->power_state
= SEP_DRIVER_POWERON
;
4363 * sep_pm_suspend - suspend routine while going to S3 state
4364 * @dev: pointer to sep device
4366 * This function is to be used to suspend sep driver while system goes to S3
4367 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4368 * Notes - revisit with more understanding of pm, ICR/IMR
4370 static int sep_pci_suspend(struct device
*dev
)
4372 struct sep_device
*sep
= sep_dev
;
4374 dev_dbg(&sep
->pdev
->dev
, "pci suspend called\n");
4375 if (sep
->in_use
== 1)
4378 sep
->power_state
= SEP_DRIVER_POWEROFF
;
4380 /* Clear ICR register */
4381 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4383 /* Set the IMR to block all */
4384 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, 0xFFFFFFFF);
4390 * sep_pm_runtime_resume - runtime resume routine
4391 * @dev: pointer to sep device
4393 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4395 static int sep_pm_runtime_resume(struct device
*dev
)
4400 struct sep_device
*sep
= sep_dev
;
4402 dev_dbg(&sep
->pdev
->dev
, "pm runtime resume called\n");
4405 * Wait until the SCU boot is ready
4406 * This is done by iterating SCU_DELAY_ITERATION (10
4407 * microseconds each) up to SCU_DELAY_MAX (50) times.
4408 * This bit can be set in a random time that is less
4409 * than 500 microseconds after each power resume
4413 while ((!retval2
) && (delay_count
< SCU_DELAY_MAX
)) {
4414 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
4415 retval2
&= 0x00000008;
4417 udelay(SCU_DELAY_ITERATION
);
4423 dev_warn(&sep
->pdev
->dev
, "scu boot bit not set at resume\n");
4427 /* Clear ICR register */
4428 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4430 /* Set the IMR register - open only GPR 2 */
4431 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4433 /* Read send/receive counters from SEP */
4434 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4435 sep
->reply_ct
&= 0x3FFFFFFF;
4436 sep
->send_ct
= sep
->reply_ct
;
4442 * sep_pm_runtime_suspend - runtime suspend routine
4443 * @dev: pointer to sep device
4445 * Notes - revisit with more understanding of pm
4447 static int sep_pm_runtime_suspend(struct device
*dev
)
4449 struct sep_device
*sep
= sep_dev
;
4451 dev_dbg(&sep
->pdev
->dev
, "pm runtime suspend called\n");
4453 /* Clear ICR register */
4454 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4459 * sep_pm - power management for sep driver
4460 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4461 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4462 * @sep_pci_suspend: suspend - main memory is still ON
4463 * @sep_pci_resume: resume - main meory is still ON
4465 static const struct dev_pm_ops sep_pm
= {
4466 .runtime_resume
= sep_pm_runtime_resume
,
4467 .runtime_suspend
= sep_pm_runtime_suspend
,
4468 .resume
= sep_pci_resume
,
4469 .suspend
= sep_pci_suspend
,
4471 #endif /* SEP_ENABLE_RUNTIME_PM */
4474 * sep_pci_driver - registers this device with pci subsystem
4475 * @name: name identifier for this driver
4476 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4477 * @sep_probe: pointer to probe function in PCI driver
4478 * @sep_remove: pointer to remove function in PCI driver
4480 static struct pci_driver sep_pci_driver
= {
4481 #ifdef SEP_ENABLE_RUNTIME_PM
4486 .name
= "sep_sec_driver",
4487 .id_table
= sep_pci_id_tbl
,
4489 .remove
= sep_remove
4493 * sep_init - init function
4495 * Module load time. Register the PCI device driver.
4498 static int __init
sep_init(void)
4500 return pci_register_driver(&sep_pci_driver
);
4505 * sep_exit - called to unload driver
4507 * Unregister the driver The device will perform all the cleanup required.
4509 static void __exit
sep_exit(void)
4511 pci_unregister_driver(&sep_pci_driver
);
4515 module_init(sep_init
);
4516 module_exit(sep_exit
);
4518 MODULE_LICENSE("GPL");