]>
Commit | Line | Data |
---|---|---|
c694b233 GC |
1 | /* |
2 | * Copyright (C) 2016 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include "cptvf.h" | |
10 | #include "request_manager.h" | |
11 | ||
12 | /** | |
13 | * get_free_pending_entry - get free entry from pending queue | |
14 | * @param pqinfo: pending_qinfo structure | |
15 | * @param qno: queue number | |
16 | */ | |
17 | static struct pending_entry *get_free_pending_entry(struct pending_queue *q, | |
18 | int qlen) | |
19 | { | |
20 | struct pending_entry *ent = NULL; | |
21 | ||
22 | ent = &q->head[q->rear]; | |
23 | if (unlikely(ent->busy)) { | |
24 | ent = NULL; | |
25 | goto no_free_entry; | |
26 | } | |
27 | ||
28 | q->rear++; | |
29 | if (unlikely(q->rear == qlen)) | |
30 | q->rear = 0; | |
31 | ||
32 | no_free_entry: | |
33 | return ent; | |
34 | } | |
35 | ||
36 | static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo, | |
37 | int qno) | |
38 | { | |
39 | struct pending_queue *queue = &pqinfo->queue[qno]; | |
40 | ||
41 | queue->front++; | |
42 | if (unlikely(queue->front == pqinfo->qlen)) | |
43 | queue->front = 0; | |
44 | } | |
45 | ||
46 | static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list, | |
47 | int buf_count, u8 *buffer) | |
48 | { | |
49 | int ret = 0, i, j; | |
50 | int components; | |
51 | struct sglist_component *sg_ptr = NULL; | |
52 | struct pci_dev *pdev = cptvf->pdev; | |
53 | ||
54 | if (unlikely(!list)) { | |
55 | dev_err(&pdev->dev, "Input List pointer is NULL\n"); | |
56 | return -EFAULT; | |
57 | } | |
58 | ||
59 | for (i = 0; i < buf_count; i++) { | |
60 | if (likely(list[i].vptr)) { | |
61 | list[i].dma_addr = dma_map_single(&pdev->dev, | |
62 | list[i].vptr, | |
63 | list[i].size, | |
64 | DMA_BIDIRECTIONAL); | |
65 | if (unlikely(dma_mapping_error(&pdev->dev, | |
66 | list[i].dma_addr))) { | |
67 | dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n", | |
68 | i); | |
69 | ret = -EIO; | |
70 | goto sg_cleanup; | |
71 | } | |
72 | } | |
73 | } | |
74 | ||
75 | components = buf_count / 4; | |
76 | sg_ptr = (struct sglist_component *)buffer; | |
77 | for (i = 0; i < components; i++) { | |
78 | sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); | |
79 | sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); | |
80 | sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); | |
81 | sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size); | |
82 | sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); | |
83 | sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); | |
84 | sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); | |
85 | sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr); | |
86 | sg_ptr++; | |
87 | } | |
88 | ||
89 | components = buf_count % 4; | |
90 | ||
91 | switch (components) { | |
92 | case 3: | |
93 | sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); | |
94 | sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); | |
95 | /* Fall through */ | |
96 | case 2: | |
97 | sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); | |
98 | sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); | |
99 | /* Fall through */ | |
100 | case 1: | |
101 | sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); | |
102 | sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); | |
103 | break; | |
104 | default: | |
105 | break; | |
106 | } | |
107 | ||
108 | return ret; | |
109 | ||
110 | sg_cleanup: | |
111 | for (j = 0; j < i; j++) { | |
112 | if (list[j].dma_addr) { | |
113 | dma_unmap_single(&pdev->dev, list[i].dma_addr, | |
114 | list[i].size, DMA_BIDIRECTIONAL); | |
115 | } | |
116 | ||
117 | list[j].dma_addr = 0; | |
118 | } | |
119 | ||
120 | return ret; | |
121 | } | |
122 | ||
123 | static inline int setup_sgio_list(struct cpt_vf *cptvf, | |
124 | struct cpt_info_buffer *info, | |
125 | struct cpt_request_info *req) | |
126 | { | |
127 | u16 g_sz_bytes = 0, s_sz_bytes = 0; | |
128 | int ret = 0; | |
129 | struct pci_dev *pdev = cptvf->pdev; | |
130 | ||
131 | if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) { | |
132 | dev_err(&pdev->dev, "Request SG components are higher than supported\n"); | |
133 | ret = -EINVAL; | |
134 | goto scatter_gather_clean; | |
135 | } | |
136 | ||
137 | /* Setup gather (input) components */ | |
138 | g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); | |
139 | info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); | |
140 | if (!info->gather_components) { | |
141 | ret = -ENOMEM; | |
142 | goto scatter_gather_clean; | |
143 | } | |
144 | ||
145 | ret = setup_sgio_components(cptvf, req->in, | |
146 | req->incnt, | |
147 | info->gather_components); | |
148 | if (ret) { | |
149 | dev_err(&pdev->dev, "Failed to setup gather list\n"); | |
150 | ret = -EFAULT; | |
151 | goto scatter_gather_clean; | |
152 | } | |
153 | ||
154 | /* Setup scatter (output) components */ | |
155 | s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); | |
156 | info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); | |
157 | if (!info->scatter_components) { | |
158 | ret = -ENOMEM; | |
159 | goto scatter_gather_clean; | |
160 | } | |
161 | ||
162 | ret = setup_sgio_components(cptvf, req->out, | |
163 | req->outcnt, | |
164 | info->scatter_components); | |
165 | if (ret) { | |
166 | dev_err(&pdev->dev, "Failed to setup gather list\n"); | |
167 | ret = -EFAULT; | |
168 | goto scatter_gather_clean; | |
169 | } | |
170 | ||
171 | /* Create and initialize DPTR */ | |
172 | info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; | |
173 | info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); | |
174 | if (!info->in_buffer) { | |
175 | ret = -ENOMEM; | |
176 | goto scatter_gather_clean; | |
177 | } | |
178 | ||
179 | ((u16 *)info->in_buffer)[0] = req->outcnt; | |
180 | ((u16 *)info->in_buffer)[1] = req->incnt; | |
181 | ((u16 *)info->in_buffer)[2] = 0; | |
182 | ((u16 *)info->in_buffer)[3] = 0; | |
183 | *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer); | |
184 | ||
185 | memcpy(&info->in_buffer[8], info->gather_components, | |
186 | g_sz_bytes); | |
187 | memcpy(&info->in_buffer[8 + g_sz_bytes], | |
188 | info->scatter_components, s_sz_bytes); | |
189 | ||
190 | info->dptr_baddr = dma_map_single(&pdev->dev, | |
191 | (void *)info->in_buffer, | |
192 | info->dlen, | |
193 | DMA_BIDIRECTIONAL); | |
194 | if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) { | |
195 | dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen); | |
196 | ret = -EIO; | |
197 | goto scatter_gather_clean; | |
198 | } | |
199 | ||
200 | /* Create and initialize RPTR */ | |
201 | info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); | |
202 | if (!info->out_buffer) { | |
203 | ret = -ENOMEM; | |
204 | goto scatter_gather_clean; | |
205 | } | |
206 | ||
207 | *((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT); | |
208 | info->alternate_caddr = (u64 *)info->out_buffer; | |
209 | info->rptr_baddr = dma_map_single(&pdev->dev, | |
210 | (void *)info->out_buffer, | |
211 | COMPLETION_CODE_SIZE, | |
212 | DMA_BIDIRECTIONAL); | |
213 | if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) { | |
214 | dev_err(&pdev->dev, "Mapping RPTR Failed %d\n", | |
215 | COMPLETION_CODE_SIZE); | |
216 | ret = -EIO; | |
217 | goto scatter_gather_clean; | |
218 | } | |
219 | ||
220 | return 0; | |
221 | ||
222 | scatter_gather_clean: | |
223 | return ret; | |
224 | } | |
225 | ||
226 | int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd, | |
227 | u32 qno) | |
228 | { | |
229 | struct pci_dev *pdev = cptvf->pdev; | |
230 | struct command_qinfo *qinfo = NULL; | |
231 | struct command_queue *queue; | |
232 | struct command_chunk *chunk; | |
233 | u8 *ent; | |
234 | int ret = 0; | |
235 | ||
236 | if (unlikely(qno >= cptvf->nr_queues)) { | |
237 | dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n", | |
238 | qno, cptvf->nr_queues); | |
239 | return -EINVAL; | |
240 | } | |
241 | ||
242 | qinfo = &cptvf->cqinfo; | |
243 | queue = &qinfo->queue[qno]; | |
244 | /* lock commad queue */ | |
245 | spin_lock(&queue->lock); | |
246 | ent = &queue->qhead->head[queue->idx * qinfo->cmd_size]; | |
247 | memcpy(ent, (void *)cmd, qinfo->cmd_size); | |
248 | ||
249 | if (++queue->idx >= queue->qhead->size / 64) { | |
250 | struct hlist_node *node; | |
251 | ||
252 | hlist_for_each(node, &queue->chead) { | |
253 | chunk = hlist_entry(node, struct command_chunk, | |
254 | nextchunk); | |
255 | if (chunk == queue->qhead) { | |
256 | continue; | |
257 | } else { | |
258 | queue->qhead = chunk; | |
259 | break; | |
260 | } | |
261 | } | |
262 | queue->idx = 0; | |
263 | } | |
264 | /* make sure all memory stores are done before ringing doorbell */ | |
265 | smp_wmb(); | |
266 | cptvf_write_vq_doorbell(cptvf, 1); | |
267 | /* unlock command queue */ | |
268 | spin_unlock(&queue->lock); | |
269 | ||
270 | return ret; | |
271 | } | |
272 | ||
273 | void do_request_cleanup(struct cpt_vf *cptvf, | |
274 | struct cpt_info_buffer *info) | |
275 | { | |
276 | int i; | |
277 | struct pci_dev *pdev = cptvf->pdev; | |
278 | struct cpt_request_info *req; | |
279 | ||
280 | if (info->dptr_baddr) | |
281 | dma_unmap_single(&pdev->dev, info->dptr_baddr, | |
282 | info->dlen, DMA_BIDIRECTIONAL); | |
283 | ||
284 | if (info->rptr_baddr) | |
285 | dma_unmap_single(&pdev->dev, info->rptr_baddr, | |
286 | COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL); | |
287 | ||
288 | if (info->comp_baddr) | |
289 | dma_unmap_single(&pdev->dev, info->comp_baddr, | |
290 | sizeof(union cpt_res_s), DMA_BIDIRECTIONAL); | |
291 | ||
292 | if (info->req) { | |
293 | req = info->req; | |
294 | for (i = 0; i < req->outcnt; i++) { | |
295 | if (req->out[i].dma_addr) | |
296 | dma_unmap_single(&pdev->dev, | |
297 | req->out[i].dma_addr, | |
298 | req->out[i].size, | |
299 | DMA_BIDIRECTIONAL); | |
300 | } | |
301 | ||
302 | for (i = 0; i < req->incnt; i++) { | |
303 | if (req->in[i].dma_addr) | |
304 | dma_unmap_single(&pdev->dev, | |
305 | req->in[i].dma_addr, | |
306 | req->in[i].size, | |
307 | DMA_BIDIRECTIONAL); | |
308 | } | |
309 | } | |
310 | ||
79517e8f | 311 | kzfree(info->scatter_components); |
312 | kzfree(info->gather_components); | |
313 | kzfree(info->out_buffer); | |
314 | kzfree(info->in_buffer); | |
315 | kzfree((void *)info->completion_addr); | |
c694b233 GC |
316 | kzfree(info); |
317 | } | |
318 | ||
319 | void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) | |
320 | { | |
321 | struct pci_dev *pdev = cptvf->pdev; | |
322 | ||
9bd82904 GC |
323 | if (!info) { |
324 | dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n"); | |
c694b233 GC |
325 | return; |
326 | } | |
327 | ||
328 | do_request_cleanup(cptvf, info); | |
329 | } | |
330 | ||
331 | static inline void process_pending_queue(struct cpt_vf *cptvf, | |
332 | struct pending_qinfo *pqinfo, | |
333 | int qno) | |
334 | { | |
335 | struct pci_dev *pdev = cptvf->pdev; | |
336 | struct pending_queue *pqueue = &pqinfo->queue[qno]; | |
337 | struct pending_entry *pentry = NULL; | |
338 | struct cpt_info_buffer *info = NULL; | |
339 | union cpt_res_s *status = NULL; | |
340 | unsigned char ccode; | |
341 | ||
342 | while (1) { | |
343 | spin_lock_bh(&pqueue->lock); | |
344 | pentry = &pqueue->head[pqueue->front]; | |
345 | if (unlikely(!pentry->busy)) { | |
346 | spin_unlock_bh(&pqueue->lock); | |
347 | break; | |
348 | } | |
349 | ||
350 | info = (struct cpt_info_buffer *)pentry->post_arg; | |
351 | if (unlikely(!info)) { | |
352 | dev_err(&pdev->dev, "Pending Entry post arg NULL\n"); | |
353 | pending_queue_inc_front(pqinfo, qno); | |
354 | spin_unlock_bh(&pqueue->lock); | |
355 | continue; | |
356 | } | |
357 | ||
358 | status = (union cpt_res_s *)pentry->completion_addr; | |
359 | ccode = status->s.compcode; | |
360 | if ((status->s.compcode == CPT_COMP_E_FAULT) || | |
361 | (status->s.compcode == CPT_COMP_E_SWERR)) { | |
362 | dev_err(&pdev->dev, "Request failed with %s\n", | |
363 | (status->s.compcode == CPT_COMP_E_FAULT) ? | |
364 | "DMA Fault" : "Software error"); | |
365 | pentry->completion_addr = NULL; | |
366 | pentry->busy = false; | |
367 | atomic64_dec((&pqueue->pending_count)); | |
368 | pentry->post_arg = NULL; | |
369 | pending_queue_inc_front(pqinfo, qno); | |
370 | do_request_cleanup(cptvf, info); | |
371 | spin_unlock_bh(&pqueue->lock); | |
372 | break; | |
373 | } else if (status->s.compcode == COMPLETION_CODE_INIT) { | |
374 | /* check for timeout */ | |
375 | if (time_after_eq(jiffies, | |
376 | (info->time_in + | |
377 | (CPT_COMMAND_TIMEOUT * HZ)))) { | |
378 | dev_err(&pdev->dev, "Request timed out"); | |
379 | pentry->completion_addr = NULL; | |
380 | pentry->busy = false; | |
381 | atomic64_dec((&pqueue->pending_count)); | |
382 | pentry->post_arg = NULL; | |
383 | pending_queue_inc_front(pqinfo, qno); | |
384 | do_request_cleanup(cptvf, info); | |
385 | spin_unlock_bh(&pqueue->lock); | |
386 | break; | |
387 | } else if ((*info->alternate_caddr == | |
388 | (~COMPLETION_CODE_INIT)) && | |
389 | (info->extra_time < TIME_IN_RESET_COUNT)) { | |
390 | info->time_in = jiffies; | |
391 | info->extra_time++; | |
392 | spin_unlock_bh(&pqueue->lock); | |
393 | break; | |
394 | } | |
395 | } | |
396 | ||
397 | pentry->completion_addr = NULL; | |
398 | pentry->busy = false; | |
399 | pentry->post_arg = NULL; | |
400 | atomic64_dec((&pqueue->pending_count)); | |
401 | pending_queue_inc_front(pqinfo, qno); | |
402 | spin_unlock_bh(&pqueue->lock); | |
403 | ||
404 | do_post_process(info->cptvf, info); | |
405 | /* | |
406 | * Calling callback after we find | |
407 | * that the request has been serviced | |
408 | */ | |
409 | pentry->callback(ccode, pentry->callback_arg); | |
410 | } | |
411 | } | |
412 | ||
413 | int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) | |
414 | { | |
415 | int ret = 0, clear = 0, queue = 0; | |
416 | struct cpt_info_buffer *info = NULL; | |
417 | struct cptvf_request *cpt_req = NULL; | |
418 | union ctrl_info *ctrl = NULL; | |
419 | union cpt_res_s *result = NULL; | |
420 | struct pending_entry *pentry = NULL; | |
421 | struct pending_queue *pqueue = NULL; | |
422 | struct pci_dev *pdev = cptvf->pdev; | |
423 | u8 group = 0; | |
424 | struct cpt_vq_command vq_cmd; | |
425 | union cpt_inst_s cptinst; | |
426 | ||
427 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
428 | if (unlikely(!info)) { | |
429 | dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); | |
430 | return -ENOMEM; | |
431 | } | |
432 | ||
433 | cpt_req = (struct cptvf_request *)&req->req; | |
434 | ctrl = (union ctrl_info *)&req->ctrl; | |
435 | ||
436 | info->cptvf = cptvf; | |
437 | group = ctrl->s.grp; | |
438 | ret = setup_sgio_list(cptvf, info, req); | |
439 | if (ret) { | |
440 | dev_err(&pdev->dev, "Setting up SG list failed"); | |
441 | goto request_cleanup; | |
442 | } | |
443 | ||
444 | cpt_req->dlen = info->dlen; | |
445 | /* | |
446 | * Get buffer for union cpt_res_s response | |
447 | * structure and its physical address | |
448 | */ | |
449 | info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); | |
450 | if (unlikely(!info->completion_addr)) { | |
451 | dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); | |
87aae50a CIK |
452 | ret = -ENOMEM; |
453 | goto request_cleanup; | |
c694b233 GC |
454 | } |
455 | ||
456 | result = (union cpt_res_s *)info->completion_addr; | |
457 | result->s.compcode = COMPLETION_CODE_INIT; | |
458 | info->comp_baddr = dma_map_single(&pdev->dev, | |
459 | (void *)info->completion_addr, | |
460 | sizeof(union cpt_res_s), | |
461 | DMA_BIDIRECTIONAL); | |
462 | if (dma_mapping_error(&pdev->dev, info->comp_baddr)) { | |
463 | dev_err(&pdev->dev, "mapping compptr Failed %lu\n", | |
464 | sizeof(union cpt_res_s)); | |
465 | ret = -EFAULT; | |
466 | goto request_cleanup; | |
467 | } | |
468 | ||
469 | /* Fill the VQ command */ | |
470 | vq_cmd.cmd.u64 = 0; | |
471 | vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags); | |
472 | vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1); | |
473 | vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); | |
474 | vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); | |
475 | ||
476 | /* 64-bit swap for microcode data reads, not needed for addresses*/ | |
477 | vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64); | |
478 | vq_cmd.dptr = info->dptr_baddr; | |
479 | vq_cmd.rptr = info->rptr_baddr; | |
480 | vq_cmd.cptr.u64 = 0; | |
481 | vq_cmd.cptr.s.grp = group; | |
482 | /* Get Pending Entry to submit command */ | |
483 | /* Always queue 0, because 1 queue per VF */ | |
484 | queue = 0; | |
485 | pqueue = &cptvf->pqinfo.queue[queue]; | |
486 | ||
487 | if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) { | |
488 | dev_err(&pdev->dev, "pending threshold reached\n"); | |
489 | process_pending_queue(cptvf, &cptvf->pqinfo, queue); | |
490 | } | |
491 | ||
492 | get_pending_entry: | |
493 | spin_lock_bh(&pqueue->lock); | |
494 | pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen); | |
495 | if (unlikely(!pentry)) { | |
496 | spin_unlock_bh(&pqueue->lock); | |
497 | if (clear == 0) { | |
498 | process_pending_queue(cptvf, &cptvf->pqinfo, queue); | |
499 | clear = 1; | |
500 | goto get_pending_entry; | |
501 | } | |
502 | dev_err(&pdev->dev, "Get free entry failed\n"); | |
503 | dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n", | |
504 | queue, pqueue->rear, pqueue->front); | |
505 | ret = -EFAULT; | |
506 | goto request_cleanup; | |
507 | } | |
508 | ||
509 | pentry->completion_addr = info->completion_addr; | |
510 | pentry->post_arg = (void *)info; | |
511 | pentry->callback = req->callback; | |
512 | pentry->callback_arg = req->callback_arg; | |
513 | info->pentry = pentry; | |
514 | pentry->busy = true; | |
515 | atomic64_inc(&pqueue->pending_count); | |
516 | ||
517 | /* Send CPT command */ | |
518 | info->pentry = pentry; | |
519 | info->time_in = jiffies; | |
520 | info->req = req; | |
521 | ||
522 | /* Create the CPT_INST_S type command for HW intrepretation */ | |
523 | cptinst.s.doneint = true; | |
524 | cptinst.s.res_addr = (u64)info->comp_baddr; | |
525 | cptinst.s.tag = 0; | |
526 | cptinst.s.grp = 0; | |
527 | cptinst.s.wq_ptr = 0; | |
528 | cptinst.s.ei0 = vq_cmd.cmd.u64; | |
529 | cptinst.s.ei1 = vq_cmd.dptr; | |
530 | cptinst.s.ei2 = vq_cmd.rptr; | |
531 | cptinst.s.ei3 = vq_cmd.cptr.u64; | |
532 | ||
533 | ret = send_cpt_command(cptvf, &cptinst, queue); | |
534 | spin_unlock_bh(&pqueue->lock); | |
535 | if (unlikely(ret)) { | |
536 | dev_err(&pdev->dev, "Send command failed for AE\n"); | |
537 | ret = -EFAULT; | |
538 | goto request_cleanup; | |
539 | } | |
540 | ||
541 | return 0; | |
542 | ||
543 | request_cleanup: | |
544 | dev_dbg(&pdev->dev, "Failed to submit CPT command\n"); | |
545 | do_request_cleanup(cptvf, info); | |
546 | ||
547 | return ret; | |
548 | } | |
549 | ||
550 | void vq_post_process(struct cpt_vf *cptvf, u32 qno) | |
551 | { | |
552 | struct pci_dev *pdev = cptvf->pdev; | |
553 | ||
554 | if (unlikely(qno > cptvf->nr_queues)) { | |
555 | dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n", | |
556 | qno); | |
557 | return; | |
558 | } | |
559 | ||
560 | process_pending_queue(cptvf, &cptvf->pqinfo, qno); | |
561 | } | |
562 | ||
563 | int cptvf_do_request(void *vfdev, struct cpt_request_info *req) | |
564 | { | |
565 | struct cpt_vf *cptvf = (struct cpt_vf *)vfdev; | |
566 | struct pci_dev *pdev = cptvf->pdev; | |
567 | ||
568 | if (!cpt_device_ready(cptvf)) { | |
569 | dev_err(&pdev->dev, "CPT Device is not ready"); | |
570 | return -ENODEV; | |
571 | } | |
572 | ||
573 | if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) { | |
574 | dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request", | |
575 | cptvf->vfid); | |
576 | return -EINVAL; | |
577 | } else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) { | |
578 | dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request", | |
579 | cptvf->vfid); | |
580 | return -EINVAL; | |
581 | } | |
582 | ||
583 | return process_request(cptvf, req); | |
584 | } |