2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 struct amd_sched_fence
*amd_sched_fence_create(struct amd_sched_entity
*s_entity
, void *owner
)
32 struct amd_sched_fence
*fence
= NULL
;
35 fence
= kmem_cache_zalloc(sched_fence_slab
, GFP_KERNEL
);
39 INIT_LIST_HEAD(&fence
->scheduled_cb
);
41 fence
->sched
= s_entity
->sched
;
42 spin_lock_init(&fence
->lock
);
44 seq
= atomic_inc_return(&s_entity
->fence_seq
);
45 fence_init(&fence
->base
, &amd_sched_fence_ops
, &fence
->lock
,
46 s_entity
->fence_context
, seq
);
51 void amd_sched_fence_signal(struct amd_sched_fence
*fence
)
53 int ret
= fence_signal(&fence
->base
);
55 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
57 FENCE_TRACE(&fence
->base
, "was already signaled\n");
60 void amd_sched_job_pre_schedule(struct amd_gpu_scheduler
*sched
,
61 struct amd_sched_job
*s_job
)
64 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
65 list_add_tail(&s_job
->node
, &sched
->ring_mirror_list
);
66 sched
->ops
->begin_job(s_job
);
67 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
70 void amd_sched_fence_scheduled(struct amd_sched_fence
*s_fence
)
72 struct fence_cb
*cur
, *tmp
;
74 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT
, &s_fence
->base
.flags
);
75 list_for_each_entry_safe(cur
, tmp
, &s_fence
->scheduled_cb
, node
) {
76 list_del_init(&cur
->node
);
77 cur
->func(&s_fence
->base
, cur
);
81 static const char *amd_sched_fence_get_driver_name(struct fence
*fence
)
86 static const char *amd_sched_fence_get_timeline_name(struct fence
*f
)
88 struct amd_sched_fence
*fence
= to_amd_sched_fence(f
);
89 return (const char *)fence
->sched
->name
;
92 static bool amd_sched_fence_enable_signaling(struct fence
*f
)
98 * amd_sched_fence_free - free up the fence memory
100 * @rcu: RCU callback head
102 * Free up the fence memory after the RCU grace period.
104 static void amd_sched_fence_free(struct rcu_head
*rcu
)
106 struct fence
*f
= container_of(rcu
, struct fence
, rcu
);
107 struct amd_sched_fence
*fence
= to_amd_sched_fence(f
);
108 kmem_cache_free(sched_fence_slab
, fence
);
112 * amd_sched_fence_release - callback that fence can be freed
116 * This function is called when the reference count becomes zero.
117 * It just RCU schedules freeing up the fence.
119 static void amd_sched_fence_release(struct fence
*f
)
121 call_rcu(&f
->rcu
, amd_sched_fence_free
);
124 const struct fence_ops amd_sched_fence_ops
= {
125 .get_driver_name
= amd_sched_fence_get_driver_name
,
126 .get_timeline_name
= amd_sched_fence_get_timeline_name
,
127 .enable_signaling
= amd_sched_fence_enable_signaling
,
129 .wait
= fence_default_wait
,
130 .release
= amd_sched_fence_release
,