]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_hash/rte_cuckoo_hash_x86.h
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_hash / rte_cuckoo_hash_x86.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /* rte_cuckoo_hash_x86.h
35 * This file holds all x86 specific Cuckoo Hash functions
36 */
37
38 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
39 * buckets around
40 */
41 static inline unsigned
42 rte_hash_cuckoo_insert_mw_tm(struct rte_hash_bucket *prim_bkt,
43 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
44 {
45 unsigned i, status;
46 unsigned try = 0;
47
48 while (try < RTE_HASH_TSX_MAX_RETRY) {
49 status = rte_xbegin();
50 if (likely(status == RTE_XBEGIN_STARTED)) {
51 /* Insert new entry if there is room in the primary
52 * bucket.
53 */
54 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
55 /* Check if slot is available */
56 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
57 prim_bkt->sig_current[i] = sig;
58 prim_bkt->sig_alt[i] = alt_hash;
59 prim_bkt->key_idx[i] = new_idx;
60 break;
61 }
62 }
63 rte_xend();
64
65 if (i != RTE_HASH_BUCKET_ENTRIES)
66 return 0;
67
68 break; /* break off try loop if transaction commits */
69 } else {
70 /* If we abort we give up this cuckoo path. */
71 try++;
72 rte_pause();
73 }
74 }
75
76 return -1;
77 }
78
79 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
80 * the path head with new entry (sig, alt_hash, new_idx)
81 */
82 static inline int
83 rte_hash_cuckoo_move_insert_mw_tm(const struct rte_hash *h,
84 struct queue_node *leaf, uint32_t leaf_slot,
85 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
86 {
87 unsigned try = 0;
88 unsigned status;
89 uint32_t prev_alt_bkt_idx;
90
91 struct queue_node *prev_node, *curr_node = leaf;
92 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
93 uint32_t prev_slot, curr_slot = leaf_slot;
94
95 while (try < RTE_HASH_TSX_MAX_RETRY) {
96 status = rte_xbegin();
97 if (likely(status == RTE_XBEGIN_STARTED)) {
98 while (likely(curr_node->prev != NULL)) {
99 prev_node = curr_node->prev;
100 prev_bkt = prev_node->bkt;
101 prev_slot = curr_node->prev_slot;
102
103 prev_alt_bkt_idx
104 = prev_bkt->sig_alt[prev_slot]
105 & h->bucket_bitmask;
106
107 if (unlikely(&h->buckets[prev_alt_bkt_idx]
108 != curr_bkt)) {
109 rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
110 }
111
112 /* Need to swap current/alt sig to allow later
113 * Cuckoo insert to move elements back to its
114 * primary bucket if available
115 */
116 curr_bkt->sig_alt[curr_slot] =
117 prev_bkt->sig_current[prev_slot];
118 curr_bkt->sig_current[curr_slot] =
119 prev_bkt->sig_alt[prev_slot];
120 curr_bkt->key_idx[curr_slot]
121 = prev_bkt->key_idx[prev_slot];
122
123 curr_slot = prev_slot;
124 curr_node = prev_node;
125 curr_bkt = curr_node->bkt;
126 }
127
128 curr_bkt->sig_current[curr_slot] = sig;
129 curr_bkt->sig_alt[curr_slot] = alt_hash;
130 curr_bkt->key_idx[curr_slot] = new_idx;
131
132 rte_xend();
133
134 return 0;
135 }
136
137 /* If we abort we give up this cuckoo path, since most likely it's
138 * no longer valid as TSX detected data conflict
139 */
140 try++;
141 rte_pause();
142 }
143
144 return -1;
145 }
146
147 /*
148 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
149 * Cuckoo
150 */
151 static inline int
152 rte_hash_cuckoo_make_space_mw_tm(const struct rte_hash *h,
153 struct rte_hash_bucket *bkt,
154 hash_sig_t sig, hash_sig_t alt_hash,
155 uint32_t new_idx)
156 {
157 unsigned i;
158 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
159 struct queue_node *tail, *head;
160 struct rte_hash_bucket *curr_bkt, *alt_bkt;
161
162 tail = queue;
163 head = queue + 1;
164 tail->bkt = bkt;
165 tail->prev = NULL;
166 tail->prev_slot = -1;
167
168 /* Cuckoo bfs Search */
169 while (likely(tail != head && head <
170 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
171 RTE_HASH_BUCKET_ENTRIES)) {
172 curr_bkt = tail->bkt;
173 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
174 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
175 if (likely(rte_hash_cuckoo_move_insert_mw_tm(h,
176 tail, i, sig,
177 alt_hash, new_idx) == 0))
178 return 0;
179 }
180
181 /* Enqueue new node and keep prev node info */
182 alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
183 & h->bucket_bitmask]);
184 head->bkt = alt_bkt;
185 head->prev = tail;
186 head->prev_slot = i;
187 head++;
188 }
189 tail++;
190 }
191
192 return -ENOSPC;
193 }