]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/media/pci/intel/ipu6/ipu6-psys.c
UBUNTU: SAUCE: IPU driver release WW52
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu6 / ipu6-psys.c
CommitLineData
f2efa4ee
WY
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2020 Intel Corporation
3
4#include <linux/uaccess.h>
5#include <linux/device.h>
6#include <linux/delay.h>
7#include <linux/highmem.h>
8#include <linux/mm.h>
9#include <linux/pm_runtime.h>
10#include <linux/kthread.h>
11#include <linux/init_task.h>
12#include <linux/version.h>
13#include <uapi/linux/sched/types.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16
17#include "ipu.h"
18#include "ipu-psys.h"
19#include "ipu6-ppg.h"
20#include "ipu-platform-regs.h"
21#include "ipu-trace.h"
22
23#define is_ppg_kcmd(kcmd) (ipu_fw_psys_pg_get_protocol( \
24 (struct ipu_psys_kcmd *)kcmd) \
25 == IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG)
26
27static bool early_pg_transfer;
28module_param(early_pg_transfer, bool, 0664);
29MODULE_PARM_DESC(early_pg_transfer,
30 "Copy PGs back to user after resource allocation");
31
32bool enable_power_gating = true;
33module_param(enable_power_gating, bool, 0664);
34MODULE_PARM_DESC(enable_power_gating, "enable power gating");
35
36struct ipu_trace_block psys_trace_blocks[] = {
37 {
38 .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE,
39 .type = IPU_TRACE_BLOCK_TUN,
40 },
41 {
42 .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE,
43 .type = IPU_TRACE_BLOCK_TM,
44 },
45 {
46 .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE,
47 .type = IPU_TRACE_BLOCK_TM,
48 },
49 {
50 .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE,
51 .type = IPU_TRACE_BLOCK_GPC,
52 },
53 {
54 .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE,
55 .type = IPU_TRACE_BLOCK_GPC,
56 },
57 {
58 .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE,
59 .type = IPU_TRACE_BLOCK_GPC,
60 },
61 {
62 .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N,
63 .type = IPU_TRACE_TIMER_RST,
64 },
65 {
66 .type = IPU_TRACE_BLOCK_END,
67 }
68};
69
70static void ipu6_set_sp_info_bits(void *base)
71{
72 int i;
73
74 writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
75 base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
76
77 for (i = 0; i < 4; i++)
78 writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
79 base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i));
80 for (i = 0; i < 4; i++)
81 writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
82 base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i));
83}
84
85#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000
86void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on)
87{
88 unsigned int i;
89 u32 val;
90
91 /* power domain req */
92 dev_dbg(&psys->adev->dev, "power %s psys sub-domains",
93 on ? "UP" : "DOWN");
94 if (on)
95 writel(IPU_PSYS_SUBDOMAINS_POWER_MASK,
96 psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
97 else
98 writel(0x0,
99 psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
100
101 i = 0;
102 do {
103 usleep_range(10, 20);
104 val = readl(psys->adev->isp->base +
105 IPU_PSYS_SUBDOMAINS_POWER_STATUS);
106 if (!(val & BIT(31))) {
107 dev_dbg(&psys->adev->dev,
108 "PS sub-domains req done with status 0x%x",
109 val);
110 break;
111 }
112 i++;
113 } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT);
114
115 if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT)
116 dev_warn(&psys->adev->dev, "Psys sub-domains %s req timeout!",
117 on ? "UP" : "DOWN");
118}
119
120void ipu_psys_setup_hw(struct ipu_psys *psys)
121{
122 void __iomem *base = psys->pdata->base;
123 void __iomem *spc_regs_base =
124 base + psys->pdata->ipdata->hw_variant.spc_offset;
125 void *psys_iommu0_ctrl;
126 u32 irqs;
127 const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG;
128 const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR;
129 const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL;
130
131 if (!psys->adev->isp->secure_mode) {
132 /* configure access blocker for non-secure mode */
133 writel(NCI_AB_ACCESS_MODE_RW,
134 base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
135 IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3));
136 writel(NCI_AB_ACCESS_MODE_RW,
137 base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
138 IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4));
139 writel(NCI_AB_ACCESS_MODE_RW,
140 base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
141 IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5));
142 }
143 psys_iommu0_ctrl = base +
144 psys->pdata->ipdata->hw_variant.mmu_hw[0].offset +
145 IPU_MMU_INFO_OFFSET;
146 writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl);
147
148 ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
149 ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL);
150
151 /* Enable FW interrupt #0 */
152 writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
153 irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0);
154 writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE);
155 writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE);
156 writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
157 writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK);
158 writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE);
159}
160
161static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd)
162{
163 struct ipu_psys_scheduler *sched = &kcmd->fh->sched;
3ebd4441 164 struct ipu_psys_ppg *kppg, *tmp;
f2efa4ee
WY
165
166 mutex_lock(&kcmd->fh->mutex);
167 if (list_empty(&sched->ppgs))
168 goto not_found;
169
3ebd4441 170 list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
f2efa4ee
WY
171 if (ipu_fw_psys_pg_get_token(kcmd)
172 != kppg->token)
173 continue;
174 mutex_unlock(&kcmd->fh->mutex);
175 return kppg;
176 }
177
178not_found:
179 mutex_unlock(&kcmd->fh->mutex);
180 return NULL;
181}
182
183/*
184 * Called to free up all resources associated with a kcmd.
185 * After this the kcmd doesn't anymore exist in the driver.
186 */
3ebd4441 187static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd)
f2efa4ee
WY
188{
189 struct ipu_psys_ppg *kppg;
190 struct ipu_psys_scheduler *sched;
191
192 if (!kcmd)
193 return;
194
195 kppg = ipu_psys_identify_kppg(kcmd);
196 sched = &kcmd->fh->sched;
197
198 if (kcmd->kbuf_set) {
199 mutex_lock(&sched->bs_mutex);
200 kcmd->kbuf_set->buf_set_size = 0;
201 mutex_unlock(&sched->bs_mutex);
202 kcmd->kbuf_set = NULL;
203 }
204
205 if (kppg) {
206 mutex_lock(&kppg->mutex);
207 if (!list_empty(&kcmd->list))
208 list_del(&kcmd->list);
209 mutex_unlock(&kppg->mutex);
210 }
211
212 kfree(kcmd->pg_manifest);
213 kfree(kcmd->kbufs);
214 kfree(kcmd->buffers);
215 kfree(kcmd);
216}
217
218static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd,
219 struct ipu_psys_fh *fh)
220{
221 struct ipu_psys *psys = fh->psys;
222 struct ipu_psys_kcmd *kcmd;
223 struct ipu_psys_kbuffer *kpgbuf;
224 unsigned int i;
225 int ret, prevfd, fd;
226
227 fd = prevfd = -1;
228
229 if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS)
230 return NULL;
231
232 if (!cmd->pg_manifest_size)
233 return NULL;
234
235 kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL);
236 if (!kcmd)
237 return NULL;
238
239 kcmd->state = KCMD_STATE_PPG_NEW;
240 kcmd->fh = fh;
241 INIT_LIST_HEAD(&kcmd->list);
242
243 mutex_lock(&fh->mutex);
244 fd = cmd->pg;
245 kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
246 if (!kpgbuf || !kpgbuf->sgt) {
247 dev_err(&psys->adev->dev, "%s kbuf %p with fd %d not found.\n",
248 __func__, kpgbuf, fd);
249 mutex_unlock(&fh->mutex);
250 goto error;
251 }
252
253 /* check and remap if possibe */
af60d6fc 254 ret = ipu_psys_mapbuf_locked(fd, fh, kpgbuf);
f2efa4ee
WY
255 if (ret) {
256 dev_err(&psys->adev->dev, "%s remap failed\n", __func__);
257 mutex_unlock(&fh->mutex);
258 goto error;
259 }
260
261 kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
262 if (!kpgbuf || !kpgbuf->sgt) {
263 WARN(1, "kbuf not found or unmapped.\n");
264 mutex_unlock(&fh->mutex);
265 goto error;
266 }
267 mutex_unlock(&fh->mutex);
268
269 kcmd->pg_user = kpgbuf->kaddr;
270 kcmd->kpg = __get_pg_buf(psys, kpgbuf->len);
271 if (!kcmd->kpg)
272 goto error;
273
274 memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size);
275
276 kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL);
277 if (!kcmd->pg_manifest)
278 goto error;
279
280 ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest,
281 cmd->pg_manifest_size);
282 if (ret)
283 goto error;
284
285 kcmd->pg_manifest_size = cmd->pg_manifest_size;
286
287 kcmd->user_token = cmd->user_token;
288 kcmd->issue_id = cmd->issue_id;
289 kcmd->priority = cmd->priority;
290 if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM)
291 goto error;
292
293 /*
294 * Kenel enable bitmap be used only.
295 */
296 memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap,
297 sizeof(cmd->kernel_enable_bitmap));
298
299 kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd);
300 kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers),
301 GFP_KERNEL);
302 if (!kcmd->buffers)
303 goto error;
304
305 kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]),
306 GFP_KERNEL);
307 if (!kcmd->kbufs)
308 goto error;
309
310 /* should be stop cmd for ppg */
311 if (!cmd->buffers) {
312 kcmd->state = KCMD_STATE_PPG_STOP;
313 return kcmd;
314 }
315
316 if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount)
317 goto error;
318
319 ret = copy_from_user(kcmd->buffers, cmd->buffers,
320 kcmd->nbuffers * sizeof(*kcmd->buffers));
321 if (ret)
322 goto error;
323
324 for (i = 0; i < kcmd->nbuffers; i++) {
325 struct ipu_fw_psys_terminal *terminal;
326
327 terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
328 if (!terminal)
329 continue;
330
331 if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) {
332 kcmd->state = KCMD_STATE_PPG_START;
333 continue;
334 }
335 if (kcmd->state == KCMD_STATE_PPG_START) {
336 dev_err(&psys->adev->dev,
337 "err: all buffer.flags&DMA_HANDLE must 0\n");
338 goto error;
339 }
340
341 mutex_lock(&fh->mutex);
342 fd = kcmd->buffers[i].base.fd;
343 kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
344 if (!kpgbuf || !kpgbuf->sgt) {
345 dev_err(&psys->adev->dev,
346 "%s kcmd->buffers[%d] %p fd %d not found.\n",
347 __func__, i, kpgbuf, fd);
348 mutex_unlock(&fh->mutex);
349 goto error;
350 }
351
af60d6fc 352 ret = ipu_psys_mapbuf_locked(fd, fh, kpgbuf);
f2efa4ee
WY
353 if (ret) {
354 dev_err(&psys->adev->dev, "%s remap failed\n",
355 __func__);
356 mutex_unlock(&fh->mutex);
357 goto error;
358 }
359
360 kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
361 if (!kpgbuf || !kpgbuf->sgt) {
362 WARN(1, "kbuf not found or unmapped.\n");
363 mutex_unlock(&fh->mutex);
364 goto error;
365 }
366 mutex_unlock(&fh->mutex);
367 kcmd->kbufs[i] = kpgbuf;
368 if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt ||
369 kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used)
370 goto error;
371 if ((kcmd->kbufs[i]->flags &
372 IPU_BUFFER_FLAG_NO_FLUSH) ||
373 (kcmd->buffers[i].flags &
374 IPU_BUFFER_FLAG_NO_FLUSH) ||
375 prevfd == kcmd->buffers[i].base.fd)
376 continue;
377
378 prevfd = kcmd->buffers[i].base.fd;
379 dma_sync_sg_for_device(&psys->adev->dev,
380 kcmd->kbufs[i]->sgt->sgl,
381 kcmd->kbufs[i]->sgt->orig_nents,
382 DMA_BIDIRECTIONAL);
383 }
384
385 if (kcmd->state != KCMD_STATE_PPG_START)
386 kcmd->state = KCMD_STATE_PPG_ENQUEUE;
387
388 return kcmd;
389error:
390 ipu_psys_kcmd_free(kcmd);
391
392 dev_dbg(&psys->adev->dev, "failed to copy cmd\n");
393
394 return NULL;
395}
396
397static struct ipu_psys_buffer_set *
398ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr)
399{
400 struct ipu_psys_fh *fh;
401 struct ipu_psys_buffer_set *kbuf_set;
402 struct ipu_psys_scheduler *sched;
403
404 list_for_each_entry(fh, &psys->fhs, list) {
405 sched = &fh->sched;
406 mutex_lock(&sched->bs_mutex);
407 list_for_each_entry(kbuf_set, &sched->buf_sets, list) {
408 if (kbuf_set->buf_set &&
409 kbuf_set->buf_set->ipu_virtual_address == addr) {
410 mutex_unlock(&sched->bs_mutex);
411 return kbuf_set;
412 }
413 }
414 mutex_unlock(&sched->bs_mutex);
415 }
416
417 return NULL;
418}
419
420static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys,
421 dma_addr_t pg_addr)
422{
423 struct ipu_psys_scheduler *sched;
3ebd4441 424 struct ipu_psys_ppg *kppg, *tmp;
f2efa4ee
WY
425 struct ipu_psys_fh *fh;
426
427 list_for_each_entry(fh, &psys->fhs, list) {
428 sched = &fh->sched;
429 mutex_lock(&fh->mutex);
430 if (list_empty(&sched->ppgs)) {
431 mutex_unlock(&fh->mutex);
432 continue;
433 }
434
3ebd4441 435 list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
f2efa4ee
WY
436 if (pg_addr != kppg->kpg->pg_dma_addr)
437 continue;
438 mutex_unlock(&fh->mutex);
439 return kppg;
440 }
441 mutex_unlock(&fh->mutex);
442 }
443
444 return NULL;
445}
446
447/*
448 * Move kcmd into completed state (due to running finished or failure).
449 * Fill up the event struct and notify waiters.
450 */
af60d6fc 451void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg,
f2efa4ee
WY
452 struct ipu_psys_kcmd *kcmd, int error)
453{
454 struct ipu_psys_fh *fh = kcmd->fh;
af60d6fc 455 struct ipu_psys *psys = fh->psys;
f2efa4ee
WY
456
457 kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE;
458 kcmd->ev.user_token = kcmd->user_token;
459 kcmd->ev.issue_id = kcmd->issue_id;
460 kcmd->ev.error = error;
af60d6fc 461 list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
f2efa4ee
WY
462
463 if (kcmd->constraint.min_freq)
464 ipu_buttress_remove_psys_constraint(psys->adev->isp,
465 &kcmd->constraint);
466
467 if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) {
468 struct ipu_psys_kbuffer *kbuf;
469
470 kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh,
471 kcmd->pg_user);
472 if (kbuf && kbuf->valid)
473 memcpy(kcmd->pg_user,
474 kcmd->kpg->pg, kcmd->kpg->pg_size);
475 else
476 dev_dbg(&psys->adev->dev, "Skipping unmapped buffer\n");
477 }
478
479 kcmd->state = KCMD_STATE_PPG_COMPLETE;
480 wake_up_interruptible(&fh->wait);
481}
482
483/*
484 * Submit kcmd into psys queue. If running fails, complete the kcmd
485 * with an error.
486 *
487 * Found a runnable PG. Move queue to the list tail for round-robin
488 * scheduling and run the PG. Start the watchdog timer if the PG was
489 * started successfully. Enable PSYS power if requested.
490 */
491int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd)
492{
493 int ret;
494
495 if (psys->adev->isp->flr_done)
496 return -EIO;
497
498 if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg)
499 memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
500
501 ret = ipu_fw_psys_pg_start(kcmd);
502 if (ret) {
503 dev_err(&psys->adev->dev, "failed to start kcmd!\n");
504 goto error;
505 }
506
507 ipu_fw_psys_pg_dump(psys, kcmd, "run");
508
509 ret = ipu_fw_psys_pg_disown(kcmd);
510 if (ret) {
511 dev_err(&psys->adev->dev, "failed to start kcmd!\n");
512 goto error;
513 }
514
515 return 0;
516
517error:
518 dev_err(&psys->adev->dev, "failed to start process group\n");
519 return ret;
520}
521
522void ipu_psys_watchdog_work(struct work_struct *work)
523{
524 struct ipu_psys *psys = container_of(work,
525 struct ipu_psys, watchdog_work);
526 dev_dbg(&psys->adev->dev, "watchdog for ppg not implemented yet!\n");
527}
528
529static void ipu_psys_watchdog(struct timer_list *t)
530{
531 struct ipu_psys_kcmd *kcmd = from_timer(kcmd, t, watchdog);
532 struct ipu_psys *psys = kcmd->fh->psys;
533
534 queue_work(IPU_PSYS_WORK_QUEUE, &psys->watchdog_work);
535}
536
af60d6fc 537static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd)
f2efa4ee
WY
538{
539 struct ipu_psys_fh *fh = kcmd->fh;
540 struct ipu_psys_scheduler *sched = &fh->sched;
541 struct ipu_psys *psys = fh->psys;
542 struct ipu_psys_ppg *kppg;
543 struct ipu_psys_resource_pool *rpr;
af60d6fc
WY
544 int queue_id;
545 int ret;
f2efa4ee
WY
546
547 rpr = &psys->resource_pool_running;
f2efa4ee 548
af60d6fc
WY
549 kppg = kzalloc(sizeof(*kppg), GFP_KERNEL);
550 if (!kppg)
551 return -ENOMEM;
552
553 kppg->fh = fh;
554 kppg->kpg = kcmd->kpg;
555 kppg->state = PPG_STATE_START;
556 kppg->pri_base = kcmd->priority;
557 kppg->pri_dynamic = 0;
558 INIT_LIST_HEAD(&kppg->list);
559
560 mutex_init(&kppg->mutex);
561 INIT_LIST_HEAD(&kppg->kcmds_new_list);
562 INIT_LIST_HEAD(&kppg->kcmds_processing_list);
563 INIT_LIST_HEAD(&kppg->kcmds_finished_list);
564 INIT_LIST_HEAD(&kppg->sched_list);
565
566 kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL);
567 if (!kppg->manifest) {
568 kfree(kppg);
569 return -ENOMEM;
570 }
571 memcpy(kppg->manifest, kcmd->pg_manifest,
572 kcmd->pg_manifest_size);
573
574 queue_id = ipu_psys_allocate_cmd_queue_resource(rpr);
575 if (queue_id == -ENOSPC) {
576 dev_err(&psys->adev->dev, "no available queue\n");
577 kfree(kppg->manifest);
578 kfree(kppg);
f2efa4ee 579 mutex_unlock(&psys->mutex);
af60d6fc
WY
580 return -ENOMEM;
581 }
f2efa4ee 582
af60d6fc
WY
583 /*
584 * set token as start cmd will immediately be followed by a
585 * enqueue cmd so that kppg could be retrieved.
586 */
587 kppg->token = (u64)kcmd->kpg;
588 ipu_fw_psys_pg_set_token(kcmd, kppg->token);
589 ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id);
590 ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd,
591 kcmd->kpg->pg_dma_addr);
592 if (ret) {
593 ipu_psys_free_cmd_queue_resource(rpr, queue_id);
594 kfree(kppg->manifest);
595 kfree(kppg);
596 return -EIO;
597 }
598 memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
f2efa4ee 599
af60d6fc
WY
600 mutex_lock(&fh->mutex);
601 list_add_tail(&kppg->list, &sched->ppgs);
602 mutex_unlock(&fh->mutex);
f2efa4ee 603
af60d6fc
WY
604 mutex_lock(&kppg->mutex);
605 list_add(&kcmd->list, &kppg->kcmds_new_list);
606 mutex_unlock(&kppg->mutex);
f2efa4ee 607
af60d6fc
WY
608 dev_dbg(&psys->adev->dev,
609 "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n",
610 ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id);
f2efa4ee 611
af60d6fc
WY
612 /* Kick l-scheduler thread */
613 atomic_set(&psys->wakeup_count, 1);
614 wake_up_interruptible(&psys->sched_cmd_wq);
f2efa4ee 615
af60d6fc
WY
616 return 0;
617}
f2efa4ee 618
af60d6fc
WY
619static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd)
620{
621 struct ipu_psys_fh *fh = kcmd->fh;
622 struct ipu_psys *psys = fh->psys;
623 struct ipu_psys_ppg *kppg;
624 struct ipu_psys_resource_pool *rpr;
625 unsigned long flags;
626 u8 id;
627 bool resche = true;
628
629 rpr = &psys->resource_pool_running;
630 if (kcmd->state == KCMD_STATE_PPG_START)
631 return ipu_psys_kcmd_send_to_ppg_start(kcmd);
f2efa4ee
WY
632
633 kppg = ipu_psys_identify_kppg(kcmd);
634 spin_lock_irqsave(&psys->pgs_lock, flags);
635 kcmd->kpg->pg_size = 0;
636 spin_unlock_irqrestore(&psys->pgs_lock, flags);
637 if (!kppg) {
638 dev_err(&psys->adev->dev, "token not match\n");
639 return -EINVAL;
640 }
641
642 kcmd->kpg = kppg->kpg;
643
644 dev_dbg(&psys->adev->dev, "%s ppg(%d, 0x%p) kcmd %p\n",
645 (kcmd->state == KCMD_STATE_PPG_STOP) ?
646 "STOP" : "ENQUEUE",
647 ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd);
648
649 if (kcmd->state == KCMD_STATE_PPG_STOP) {
650 mutex_lock(&kppg->mutex);
651 if (kppg->state == PPG_STATE_STOPPED) {
652 dev_dbg(&psys->adev->dev,
653 "kppg 0x%p stopped!\n", kppg);
654 id = ipu_fw_psys_ppg_get_base_queue_id(kcmd);
655 ipu_psys_free_cmd_queue_resource(rpr, id);
af60d6fc 656 ipu_psys_kcmd_complete(kppg, kcmd, 0);
f2efa4ee
WY
657 pm_runtime_put(&psys->adev->dev);
658 resche = false;
659 } else {
660 list_add(&kcmd->list, &kppg->kcmds_new_list);
661 }
662 mutex_unlock(&kppg->mutex);
663 } else {
664 int ret;
665
666 ret = ipu_psys_ppg_get_bufset(kcmd, kppg);
667 if (ret)
668 return ret;
669
670 mutex_lock(&kppg->mutex);
671 list_add_tail(&kcmd->list, &kppg->kcmds_new_list);
672 mutex_unlock(&kppg->mutex);
673 }
674
675 if (resche) {
676 /* Kick l-scheduler thread */
677 atomic_set(&psys->wakeup_count, 1);
678 wake_up_interruptible(&psys->sched_cmd_wq);
679 }
680 return 0;
681}
682
683int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh)
684{
685 struct ipu_psys *psys = fh->psys;
686 struct ipu_psys_kcmd *kcmd;
687 size_t pg_size;
688 int ret;
689
690 if (psys->adev->isp->flr_done)
691 return -EIO;
692
693 kcmd = ipu_psys_copy_cmd(cmd, fh);
694 if (!kcmd)
695 return -EINVAL;
696
697 pg_size = ipu_fw_psys_pg_get_size(kcmd);
698 if (pg_size > kcmd->kpg->pg_size) {
699 dev_dbg(&psys->adev->dev, "pg size mismatch %lu %lu\n",
700 pg_size, kcmd->kpg->pg_size);
701 ret = -EINVAL;
702 goto error;
703 }
704
705 if (!is_ppg_kcmd(kcmd)) {
706 dev_err(&psys->adev->dev, "No support legacy pg now\n");
707 ret = -EINVAL;
708 goto error;
709 }
710
711 timer_setup(&kcmd->watchdog, ipu_psys_watchdog, 0);
712
713 if (cmd->min_psys_freq) {
714 kcmd->constraint.min_freq = cmd->min_psys_freq;
715 ipu_buttress_add_psys_constraint(psys->adev->isp,
716 &kcmd->constraint);
717 }
718
719 ret = ipu_psys_kcmd_send_to_ppg(kcmd);
720 if (ret)
721 goto error;
722
723 dev_dbg(&psys->adev->dev,
724 "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n",
725 cmd->user_token, cmd->issue_id, cmd->priority);
726
727 return 0;
728
729error:
730 ipu_psys_kcmd_free(kcmd);
731
732 return ret;
733}
734
735static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys,
736 struct ipu_psys_kcmd *kcmd)
737{
738 struct ipu_psys_fh *fh;
739 struct ipu_psys_kcmd *kcmd0;
3ebd4441 740 struct ipu_psys_ppg *kppg, *tmp;
f2efa4ee
WY
741 struct ipu_psys_scheduler *sched;
742
743 list_for_each_entry(fh, &psys->fhs, list) {
744 sched = &fh->sched;
745 mutex_lock(&fh->mutex);
746 if (list_empty(&sched->ppgs)) {
747 mutex_unlock(&fh->mutex);
748 continue;
749 }
3ebd4441 750 list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
f2efa4ee
WY
751 mutex_lock(&kppg->mutex);
752 list_for_each_entry(kcmd0,
753 &kppg->kcmds_processing_list,
754 list) {
755 if (kcmd0 == kcmd) {
756 mutex_unlock(&kppg->mutex);
757 mutex_unlock(&fh->mutex);
758 return true;
759 }
760 }
761 mutex_unlock(&kppg->mutex);
762 }
763 mutex_unlock(&fh->mutex);
764 }
765
766 return false;
767}
768
769void ipu_psys_handle_events(struct ipu_psys *psys)
770{
771 struct ipu_psys_kcmd *kcmd;
772 struct ipu_fw_psys_event event;
773 struct ipu_psys_ppg *kppg;
774 bool error;
775 u32 hdl;
776 u16 cmd, status;
af60d6fc 777 int res;
f2efa4ee
WY
778
779 do {
780 memset(&event, 0, sizeof(event));
781 if (!ipu_fw_psys_rcv_event(psys, &event))
782 break;
783
784 if (!event.context_handle)
785 break;
786
787 dev_dbg(&psys->adev->dev, "ppg event: 0x%x, %d, status %d\n",
788 event.context_handle, event.command, event.status);
789
790 error = false;
791 /*
792 * event.command == CMD_RUN shows this is fw processing frame
793 * done as pPG mode, and event.context_handle should be pointer
794 * of buffer set; so we make use of this pointer to lookup
795 * kbuffer_set and kcmd
796 */
797 hdl = event.context_handle;
798 cmd = event.command;
799 status = event.status;
800
801 kppg = NULL;
802 kcmd = NULL;
803 if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) {
804 struct ipu_psys_buffer_set *kbuf_set;
805 /*
806 * Need change ppg state when the 1st running is done
807 * (after PPG started/resumed)
808 */
809 kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl);
810 if (kbuf_set)
811 kcmd = kbuf_set->kcmd;
812 if (!kbuf_set || !kcmd)
813 error = true;
814 else
815 kppg = ipu_psys_identify_kppg(kcmd);
816 } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP ||
817 cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND ||
818 cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) {
819 /*
820 * STOP/SUSPEND/RESUME cmd event would run this branch;
821 * only stop cmd queued by user has stop_kcmd and need
822 * to notify user to dequeue.
823 */
824 kppg = ipu_psys_lookup_ppg(psys, hdl);
825 if (kppg) {
826 mutex_lock(&kppg->mutex);
af60d6fc 827 if (kppg->state == PPG_STATE_STOPPING) {
f2efa4ee
WY
828 kcmd = ipu_psys_ppg_get_stop_kcmd(kppg);
829 if (!kcmd)
830 error = true;
831 }
832 mutex_unlock(&kppg->mutex);
f2efa4ee
WY
833 }
834 } else {
835 dev_err(&psys->adev->dev, "invalid event\n");
836 continue;
837 }
838
af60d6fc 839 if (error || !kppg) {
f2efa4ee
WY
840 dev_err(&psys->adev->dev, "event error, command %d\n",
841 cmd);
842 break;
843 }
844
845 dev_dbg(&psys->adev->dev, "event to kppg 0x%p, kcmd 0x%p\n",
846 kppg, kcmd);
847
af60d6fc 848 ipu_psys_ppg_complete(psys, kppg);
f2efa4ee
WY
849
850 if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) {
851 res = (status == IPU_PSYS_EVENT_CMD_COMPLETE ||
852 status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ?
853 0 : -EIO;
854 mutex_lock(&kppg->mutex);
af60d6fc 855 ipu_psys_kcmd_complete(kppg, kcmd, res);
f2efa4ee 856 mutex_unlock(&kppg->mutex);
f2efa4ee
WY
857 }
858 } while (1);
859}
860
861int ipu_psys_fh_init(struct ipu_psys_fh *fh)
862{
863 struct ipu_psys *psys = fh->psys;
864 struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp;
865 struct ipu_psys_scheduler *sched = &fh->sched;
866 int i;
867
868 mutex_init(&sched->bs_mutex);
869 INIT_LIST_HEAD(&sched->buf_sets);
870 INIT_LIST_HEAD(&sched->ppgs);
871 pm_runtime_dont_use_autosuspend(&psys->adev->dev);
872 /* allocate and map memory for buf_sets */
873 for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) {
874 kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL);
875 if (!kbuf_set)
876 goto out_free_buf_sets;
877 kbuf_set->kaddr = dma_alloc_attrs(&psys->adev->dev,
878 IPU_PSYS_BUF_SET_MAX_SIZE,
879 &kbuf_set->dma_addr,
880 GFP_KERNEL,
881 0);
882 if (!kbuf_set->kaddr) {
883 kfree(kbuf_set);
884 goto out_free_buf_sets;
885 }
886 kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE;
887 list_add(&kbuf_set->list, &sched->buf_sets);
888 }
889
890 return 0;
891
892out_free_buf_sets:
893 list_for_each_entry_safe(kbuf_set, kbuf_set_tmp,
894 &sched->buf_sets, list) {
895 dma_free_attrs(&psys->adev->dev,
896 kbuf_set->size, kbuf_set->kaddr,
897 kbuf_set->dma_addr, 0);
898 list_del(&kbuf_set->list);
899 kfree(kbuf_set);
900 }
901 mutex_destroy(&sched->bs_mutex);
902
903 return -ENOMEM;
904}
905
906int ipu_psys_fh_deinit(struct ipu_psys_fh *fh)
907{
908 struct ipu_psys *psys = fh->psys;
909 struct ipu_psys_ppg *kppg, *kppg0;
910 struct ipu_psys_kcmd *kcmd, *kcmd0;
911 struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0;
912 struct ipu_psys_scheduler *sched = &fh->sched;
913 struct ipu_psys_resource_pool *rpr;
914 struct ipu_psys_resource_alloc *alloc;
915 u8 id;
916
917 mutex_lock(&fh->mutex);
918 if (!list_empty(&sched->ppgs)) {
919 list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) {
3ebd4441
WY
920 unsigned long flags;
921
f2efa4ee
WY
922 mutex_lock(&kppg->mutex);
923 if (!(kppg->state &
924 (PPG_STATE_STOPPED |
925 PPG_STATE_STOPPING))) {
f2efa4ee
WY
926 struct ipu_psys_kcmd tmp = {
927 .kpg = kppg->kpg,
928 };
929
930 rpr = &psys->resource_pool_running;
931 alloc = &kppg->kpg->resource_alloc;
932 id = ipu_fw_psys_ppg_get_base_queue_id(&tmp);
933 ipu_psys_ppg_stop(kppg);
934 ipu_psys_free_resources(alloc, rpr);
935 ipu_psys_free_cmd_queue_resource(rpr, id);
936 dev_dbg(&psys->adev->dev,
937 "s_change:%s %p %d -> %d\n", __func__,
938 kppg, kppg->state, PPG_STATE_STOPPED);
939 kppg->state = PPG_STATE_STOPPED;
f2efa4ee
WY
940 if (psys->power_gating != PSYS_POWER_GATED)
941 pm_runtime_put(&psys->adev->dev);
942 }
3ebd4441 943 list_del(&kppg->list);
f2efa4ee
WY
944 mutex_unlock(&kppg->mutex);
945
946 list_for_each_entry_safe(kcmd, kcmd0,
947 &kppg->kcmds_new_list, list) {
948 kcmd->pg_user = NULL;
3ebd4441 949 mutex_unlock(&fh->mutex);
f2efa4ee 950 ipu_psys_kcmd_free(kcmd);
3ebd4441 951 mutex_lock(&fh->mutex);
f2efa4ee
WY
952 }
953
954 list_for_each_entry_safe(kcmd, kcmd0,
955 &kppg->kcmds_processing_list,
956 list) {
957 kcmd->pg_user = NULL;
3ebd4441 958 mutex_unlock(&fh->mutex);
f2efa4ee 959 ipu_psys_kcmd_free(kcmd);
3ebd4441 960 mutex_lock(&fh->mutex);
f2efa4ee
WY
961 }
962
963 list_for_each_entry_safe(kcmd, kcmd0,
964 &kppg->kcmds_finished_list,
965 list) {
966 kcmd->pg_user = NULL;
3ebd4441 967 mutex_unlock(&fh->mutex);
f2efa4ee 968 ipu_psys_kcmd_free(kcmd);
3ebd4441 969 mutex_lock(&fh->mutex);
f2efa4ee
WY
970 }
971
3ebd4441
WY
972 spin_lock_irqsave(&psys->pgs_lock, flags);
973 kppg->kpg->pg_size = 0;
974 spin_unlock_irqrestore(&psys->pgs_lock, flags);
f2efa4ee
WY
975
976 mutex_destroy(&kppg->mutex);
977 kfree(kppg->manifest);
978 kfree(kppg);
f2efa4ee
WY
979 }
980 }
981 mutex_unlock(&fh->mutex);
982
983 mutex_lock(&sched->bs_mutex);
984 list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) {
985 dma_free_attrs(&psys->adev->dev,
986 kbuf_set->size, kbuf_set->kaddr,
987 kbuf_set->dma_addr, 0);
988 list_del(&kbuf_set->list);
989 kfree(kbuf_set);
990 }
991 mutex_unlock(&sched->bs_mutex);
992 mutex_destroy(&sched->bs_mutex);
993
994 return 0;
995}
996
997struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh)
998{
999 struct ipu_psys_scheduler *sched = &fh->sched;
1000 struct ipu_psys_kcmd *kcmd;
1001 struct ipu_psys_ppg *kppg;
1002
1003 mutex_lock(&fh->mutex);
1004 if (list_empty(&sched->ppgs)) {
1005 mutex_unlock(&fh->mutex);
1006 return NULL;
1007 }
1008
1009 list_for_each_entry(kppg, &sched->ppgs, list) {
1010 mutex_lock(&kppg->mutex);
1011 if (list_empty(&kppg->kcmds_finished_list)) {
1012 mutex_unlock(&kppg->mutex);
1013 continue;
1014 }
1015
1016 kcmd = list_first_entry(&kppg->kcmds_finished_list,
1017 struct ipu_psys_kcmd, list);
1018 mutex_unlock(&fh->mutex);
1019 mutex_unlock(&kppg->mutex);
1020 dev_dbg(&fh->psys->adev->dev,
1021 "get completed kcmd 0x%p\n", kcmd);
1022 return kcmd;
1023 }
1024 mutex_unlock(&fh->mutex);
1025
1026 return NULL;
1027}
1028
1029long ipu_ioctl_dqevent(struct ipu_psys_event *event,
1030 struct ipu_psys_fh *fh, unsigned int f_flags)
1031{
1032 struct ipu_psys *psys = fh->psys;
1033 struct ipu_psys_kcmd *kcmd = NULL;
1034 int rval;
1035
1036 dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n");
1037
1038 if (!(f_flags & O_NONBLOCK)) {
1039 rval = wait_event_interruptible(fh->wait,
1040 (kcmd =
1041 ipu_get_completed_kcmd(fh)));
1042 if (rval == -ERESTARTSYS)
1043 return rval;
1044 }
1045
1046 if (!kcmd) {
1047 kcmd = ipu_get_completed_kcmd(fh);
1048 if (!kcmd)
1049 return -ENODATA;
1050 }
1051
1052 *event = kcmd->ev;
1053 ipu_psys_kcmd_free(kcmd);
1054
1055 return 0;
1056}