]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/event/octeontx/timvf_worker.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / event / octeontx / timvf_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include "timvf_worker.h"
6
7 static inline int
8 timvf_timer_reg_checks(const struct timvf_ring * const timr,
9 struct rte_event_timer * const tim)
10 {
11 if (unlikely(tim->state)) {
12 tim->state = RTE_EVENT_TIMER_ERROR;
13 rte_errno = EALREADY;
14 goto fail;
15 }
16
17 if (unlikely(!tim->timeout_ticks ||
18 tim->timeout_ticks >= timr->nb_bkts)) {
19 tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
20 : RTE_EVENT_TIMER_ERROR_TOOEARLY;
21 rte_errno = EINVAL;
22 goto fail;
23 }
24
25 return 0;
26 fail:
27 return -EINVAL;
28 }
29
30 static inline void
31 timvf_format_event(const struct rte_event_timer * const tim,
32 struct tim_mem_entry * const entry)
33 {
34 entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
35 (tim->ev.event & 0xFFFFFFFFF);
36 entry->wqe = tim->ev.u64;
37 }
38
39 uint16_t
40 timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
41 struct rte_event_timer **tim, const uint16_t nb_timers)
42 {
43 RTE_SET_USED(adptr);
44 int ret;
45 uint16_t index;
46
47 for (index = 0; index < nb_timers; index++) {
48 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
49 rte_errno = EALREADY;
50 break;
51 }
52
53 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
54 rte_errno = EINVAL;
55 break;
56 }
57 ret = timvf_rem_entry(tim[index]);
58 if (ret) {
59 rte_errno = -ret;
60 break;
61 }
62 }
63 return index;
64 }
65
66 uint16_t
67 timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
68 struct rte_event_timer **tim, const uint16_t nb_timers)
69 {
70 int ret;
71 uint16_t index;
72 struct tim_mem_entry entry;
73 struct timvf_ring *timr = adptr->data->adapter_priv;
74 for (index = 0; index < nb_timers; index++) {
75 if (timvf_timer_reg_checks(timr, tim[index]))
76 break;
77
78 timvf_format_event(tim[index], &entry);
79 ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
80 tim[index], &entry);
81 if (unlikely(ret)) {
82 rte_errno = -ret;
83 break;
84 }
85 }
86
87 return index;
88 }
89
90 uint16_t
91 timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
92 struct rte_event_timer **tim, const uint16_t nb_timers)
93 {
94 uint16_t ret;
95 struct timvf_ring *timr = adptr->data->adapter_priv;
96
97 ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
98 timr->tim_arm_cnt += ret;
99
100 return ret;
101 }
102
103 uint16_t
104 timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
105 struct rte_event_timer **tim, const uint16_t nb_timers)
106 {
107 int ret;
108 uint16_t index;
109 struct tim_mem_entry entry;
110 struct timvf_ring *timr = adptr->data->adapter_priv;
111 for (index = 0; index < nb_timers; index++) {
112 if (timvf_timer_reg_checks(timr, tim[index]))
113 break;
114 timvf_format_event(tim[index], &entry);
115 ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
116 tim[index], &entry);
117 if (unlikely(ret)) {
118 rte_errno = -ret;
119 break;
120 }
121 }
122
123 return index;
124 }
125
126 uint16_t
127 timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
128 struct rte_event_timer **tim, const uint16_t nb_timers)
129 {
130 uint16_t ret;
131 struct timvf_ring *timr = adptr->data->adapter_priv;
132
133 ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
134 timr->tim_arm_cnt += ret;
135
136 return ret;
137 }
138
139 uint16_t
140 timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
141 struct rte_event_timer **tim, const uint64_t timeout_tick,
142 const uint16_t nb_timers)
143 {
144 int ret;
145 uint16_t set_timers = 0;
146 uint16_t idx;
147 uint16_t arr_idx = 0;
148 struct timvf_ring *timr = adptr->data->adapter_priv;
149 struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
150
151 if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
152 const enum rte_event_timer_state state = timeout_tick ?
153 RTE_EVENT_TIMER_ERROR_TOOLATE :
154 RTE_EVENT_TIMER_ERROR_TOOEARLY;
155 for (idx = 0; idx < nb_timers; idx++)
156 tim[idx]->state = state;
157 rte_errno = EINVAL;
158 return 0;
159 }
160
161 while (arr_idx < nb_timers) {
162 for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
163 idx++, arr_idx++) {
164 timvf_format_event(tim[arr_idx], &entry[idx]);
165 }
166 ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
167 entry, idx);
168 set_timers += ret;
169 if (ret != idx)
170 break;
171 }
172
173 return set_timers;
174 }
175
176
177 uint16_t
178 timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
179 struct rte_event_timer **tim, const uint64_t timeout_tick,
180 const uint16_t nb_timers)
181 {
182 uint16_t set_timers;
183 struct timvf_ring *timr = adptr->data->adapter_priv;
184
185 set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
186 nb_timers);
187 timr->tim_arm_cnt += set_timers;
188
189 return set_timers;
190 }
191
192 void
193 timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
194 {
195 if (use_fpa)
196 timr->refill_chunk = timvf_refill_chunk_fpa;
197 else
198 timr->refill_chunk = timvf_refill_chunk_generic;
199 }