2 /* SPDX-License-Identifier: LGPL-2.1+ */
5 #include <sys/timerfd.h>
15 typedef enum EventSourceType
{
19 SOURCE_TIME_MONOTONIC
,
20 SOURCE_TIME_REALTIME_ALARM
,
21 SOURCE_TIME_BOOTTIME_ALARM
,
29 _SOURCE_EVENT_SOURCE_TYPE_MAX
,
30 _SOURCE_EVENT_SOURCE_TYPE_INVALID
= -1
33 /* All objects we use in epoll events start with this value, so that
34 * we know how to dispatch it */
35 typedef enum WakeupType
{
42 _WAKEUP_TYPE_INVALID
= -1,
47 struct sd_event_source
{
54 sd_event_handler_t prepare
;
58 EventSourceType type
:5;
65 unsigned pending_index
;
66 unsigned prepare_index
;
67 uint64_t pending_iteration
;
68 uint64_t prepare_iteration
;
70 sd_event_destroy_t destroy_callback
;
72 LIST_FIELDS(sd_event_source
, sources
);
76 sd_event_io_handler_t callback
;
84 sd_event_time_handler_t callback
;
85 usec_t next
, accuracy
;
86 unsigned earliest_index
;
87 unsigned latest_index
;
90 sd_event_signal_handler_t callback
;
91 struct signalfd_siginfo siginfo
;
95 sd_event_child_handler_t callback
;
101 sd_event_handler_t callback
;
104 sd_event_handler_t callback
;
107 sd_event_handler_t callback
;
108 unsigned prioq_index
;
111 sd_event_inotify_handler_t callback
;
113 struct inode_data
*inode_data
;
114 LIST_FIELDS(sd_event_source
, by_inode_data
);
123 /* For all clocks we maintain two priority queues each, one
124 * ordered for the earliest times the events may be
125 * dispatched, and one ordered by the latest times they must
126 * have been dispatched. The range between the top entries in
127 * the two prioqs is the time window we can freely schedule
140 /* For each priority we maintain one signal fd, so that we
141 * only have to dequeue a single event per priority at a
147 sd_event_source
*current
;
150 /* A structure listing all event sources currently watching a specific inode */
152 /* The identifier for the inode, the combination of the .st_dev + .st_ino fields of the file */
156 /* An fd of the inode to watch. The fd is kept open until the next iteration of the loop, so that we can
157 * rearrange the priority still until then, as we need the original inode to change the priority as we need to
158 * add a watch descriptor to the right inotify for the priority which we can only do if we have a handle to the
159 * original inode. We keep a list of all inode_data objects with an open fd in the to_close list (see below) of
160 * the sd-event object, so that it is efficient to close everything, before entering the next event loop
164 /* The inotify "watch descriptor" */
167 /* The combination of the mask of all inotify watches on this inode we manage. This is also the mask that has
168 * most recently been set on the watch descriptor. */
169 uint32_t combined_mask
;
171 /* All event sources subscribed to this inode */
172 LIST_HEAD(sd_event_source
, event_sources
);
174 /* The inotify object we watch this inode with */
175 struct inotify_data
*inotify_data
;
177 /* A linked list of all inode data objects with fds to close (see above) */
178 LIST_FIELDS(struct inode_data
, to_close
);
181 /* A structure encapsulating an inotify fd */
182 struct inotify_data
{
185 /* For each priority we maintain one inotify fd, so that we only have to dequeue a single event per priority at
191 Hashmap
*inodes
; /* The inode_data structures keyed by dev+ino */
192 Hashmap
*wd
; /* The inode_data structures keyed by the watch descriptor for each */
194 /* The buffer we read inotify events into */
195 union inotify_event_buffer buffer
;
196 size_t buffer_filled
; /* fill level of the buffer */
198 /* How many event sources are currently marked pending for this inotify. We won't read new events off the
199 * inotify fd as long as there are still pending events on the inotify (because we have no strategy of queuing
200 * the events locally if they can't be coalesced). */
203 /* A linked list of all inotify objects with data already read, that still need processing. We keep this list
204 * to make it efficient to figure out what inotify objects to process data on next. */
205 LIST_FIELDS(struct inotify_data
, buffered
);