BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_SK_STORAGE,
};
/* Note that tracing related programs such as
BPF_PROG_TYPE_SK_REUSEPORT,
BPF_PROG_TYPE_FLOW_DISSECTOR,
BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
};
enum bpf_attach_type {
* was provided.
*
* **-ERANGE** if resulting value was out of range.
+ *
+ * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * Description
+ * Get a bpf-local-storage from a sk.
+ *
+ * Logically, it could be thought of getting the value from
+ * a *map* with *sk* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem(map, &sk)** except this
+ * helper enforces the key must be a **bpf_fullsock()**
+ * and the map must be a BPF_MAP_TYPE_SK_STORAGE also.
+ *
+ * Underneath, the value is stored locally at *sk* instead of
+ * the map. The *map* is used as the bpf-local-storage **type**.
+ * The bpf-local-storage **type** (i.e. the *map*) is searched
+ * against all bpf-local-storages residing at sk.
+ *
+ * An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be
+ * used such that a new bpf-local-storage will be
+ * created if one does not exist. *value* can be used
+ * together with BPF_SK_STORAGE_GET_F_CREATE to specify
+ * the initial value of a bpf-local-storage. If *value* is
+ * NULL, the new bpf-local-storage will be zero initialized.
+ * Return
+ * A bpf-local-storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf-local-storage.
+ *
+ * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * Description
+ * Delete a bpf-local-storage from a sk.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf-local-storage cannot be found.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
FN(sysctl_get_new_value), \
FN(sysctl_set_new_value), \
FN(strtol), \
- FN(strtoul),
+ FN(strtoul), \
+ FN(sk_storage_get), \
+ FN(sk_storage_delete),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
/* BPF_FUNC_sysctl_get_name flags. */
#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+/* BPF_FUNC_sk_storage_get flags */
+#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+/* The format for the admin sched (dump only):
+ * [TCA_TAPRIO_SCHED_ADMIN_SCHED]
+ * [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
+ */
+
enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
TCA_TAPRIO_PAD,
+ TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
__TCA_TAPRIO_ATTR_MAX,
};