]>
Commit | Line | Data |
---|---|---|
94bd2708 DN |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
361916a9 | 6 | * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. |
94bd2708 DN |
7 | */ |
8 | ||
9 | /* | |
10 | * Cross Partition Communication (XPC) uv-based functions. | |
11 | * | |
12 | * Architecture specific implementation of common functions. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
5b8669df DN |
17 | #include <linux/mm.h> |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/device.h> | |
2525789b | 21 | #include <linux/err.h> |
261f3b49 | 22 | #include <asm/uv/uv_hub.h> |
2525789b DN |
23 | #if defined CONFIG_X86_64 |
24 | #include <asm/uv/bios.h> | |
25 | #include <asm/uv/uv_irq.h> | |
26 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
27 | #include <asm/sn/intr.h> | |
28 | #include <asm/sn/sn_sal.h> | |
29 | #endif | |
5b8669df | 30 | #include "../sgi-gru/gru.h" |
261f3b49 | 31 | #include "../sgi-gru/grukservices.h" |
94bd2708 DN |
32 | #include "xpc.h" |
33 | ||
6f2584f4 JS |
34 | #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
35 | struct uv_IO_APIC_route_entry { | |
36 | __u64 vector : 8, | |
37 | delivery_mode : 3, | |
38 | dest_mode : 1, | |
39 | delivery_status : 1, | |
40 | polarity : 1, | |
41 | __reserved_1 : 1, | |
42 | trigger : 1, | |
43 | mask : 1, | |
44 | __reserved_2 : 15, | |
45 | dest : 32; | |
46 | }; | |
47 | #endif | |
48 | ||
a374c57b | 49 | static struct xpc_heartbeat_uv *xpc_heartbeat_uv; |
33ba3c77 | 50 | |
5b8669df | 51 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
2525789b DN |
52 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
53 | XPC_ACTIVATE_MSG_SIZE_UV) | |
54 | #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" | |
5b8669df | 55 | |
2525789b DN |
56 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) |
57 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | |
58 | XPC_NOTIFY_MSG_SIZE_UV) | |
59 | #define XPC_NOTIFY_IRQ_NAME "xpc_notify" | |
5b8669df | 60 | |
2525789b DN |
61 | static struct xpc_gru_mq_uv *xpc_activate_mq_uv; |
62 | static struct xpc_gru_mq_uv *xpc_notify_mq_uv; | |
5b8669df DN |
63 | |
64 | static int | |
a7665b0a | 65 | xpc_setup_partitions_uv(void) |
5b8669df DN |
66 | { |
67 | short partid; | |
68 | struct xpc_partition_uv *part_uv; | |
69 | ||
70 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | |
71 | part_uv = &xpc_partitions[partid].sn.uv; | |
72 | ||
6f2584f4 | 73 | mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); |
5b8669df DN |
74 | spin_lock_init(&part_uv->flags_lock); |
75 | part_uv->remote_act_state = XPC_P_AS_INACTIVE; | |
76 | } | |
77 | return 0; | |
78 | } | |
79 | ||
6f2584f4 | 80 | static void |
a7665b0a | 81 | xpc_teardown_partitions_uv(void) |
6f2584f4 JS |
82 | { |
83 | short partid; | |
84 | struct xpc_partition_uv *part_uv; | |
85 | unsigned long irq_flags; | |
86 | ||
87 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | |
88 | part_uv = &xpc_partitions[partid].sn.uv; | |
89 | ||
90 | if (part_uv->cached_activate_gru_mq_desc != NULL) { | |
91 | mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); | |
92 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
93 | part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; | |
94 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
95 | kfree(part_uv->cached_activate_gru_mq_desc); | |
96 | part_uv->cached_activate_gru_mq_desc = NULL; | |
97 | mutex_unlock(&part_uv-> | |
98 | cached_activate_gru_mq_desc_mutex); | |
99 | } | |
100 | } | |
101 | } | |
102 | ||
2525789b DN |
103 | static int |
104 | xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |
105 | { | |
6f2584f4 JS |
106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
107 | ||
2525789b | 108 | #if defined CONFIG_X86_64 |
6c2c5029 DS |
109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, |
110 | UV_AFFINITY_CPU); | |
2525789b DN |
111 | if (mq->irq < 0) { |
112 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | |
6f2584f4 JS |
113 | -mq->irq); |
114 | return mq->irq; | |
2525789b DN |
115 | } |
116 | ||
6f2584f4 | 117 | mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); |
2525789b | 118 | |
6f2584f4 | 119 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
2525789b DN |
120 | if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) |
121 | mq->irq = SGI_XPC_ACTIVATE; | |
122 | else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) | |
123 | mq->irq = SGI_XPC_NOTIFY; | |
124 | else | |
125 | return -EINVAL; | |
126 | ||
6f2584f4 JS |
127 | mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; |
128 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); | |
2525789b DN |
129 | #else |
130 | #error not a supported configuration | |
131 | #endif | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static void | |
137 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | |
138 | { | |
139 | #if defined CONFIG_X86_64 | |
6c2c5029 | 140 | uv_teardown_irq(mq->irq); |
2525789b DN |
141 | |
142 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
143 | int mmr_pnode; | |
144 | unsigned long mmr_value; | |
145 | ||
146 | mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | |
147 | mmr_value = 1UL << 16; | |
148 | ||
149 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | |
150 | #else | |
151 | #error not a supported configuration | |
152 | #endif | |
153 | } | |
154 | ||
155 | static int | |
156 | xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | |
157 | { | |
158 | int ret; | |
159 | ||
c2c9f115 RH |
160 | #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
161 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | |
162 | ||
163 | ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), | |
c8182f00 | 164 | mq->order, &mq->mmr_offset); |
2525789b DN |
165 | if (ret < 0) { |
166 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | |
167 | ret); | |
168 | return -EBUSY; | |
169 | } | |
c2c9f115 RH |
170 | #elif defined CONFIG_X86_64 |
171 | ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), | |
172 | mq->order, &mq->mmr_offset); | |
173 | if (ret < 0) { | |
174 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | |
175 | "ret=%d\n", ret); | |
176 | return ret; | |
177 | } | |
2525789b DN |
178 | #else |
179 | #error not a supported configuration | |
180 | #endif | |
181 | ||
182 | mq->watchlist_num = ret; | |
183 | return 0; | |
184 | } | |
185 | ||
186 | static void | |
187 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | |
188 | { | |
189 | int ret; | |
c2c9f115 | 190 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
2525789b DN |
191 | |
192 | #if defined CONFIG_X86_64 | |
c2c9f115 | 193 | ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
2525789b DN |
194 | BUG_ON(ret != BIOS_STATUS_SUCCESS); |
195 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
c2c9f115 | 196 | ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
2525789b DN |
197 | BUG_ON(ret != SALRET_OK); |
198 | #else | |
199 | #error not a supported configuration | |
200 | #endif | |
201 | } | |
202 | ||
203 | static struct xpc_gru_mq_uv * | |
204 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |
5b8669df DN |
205 | irq_handler_t irq_handler) |
206 | { | |
2525789b | 207 | enum xp_retval xp_ret; |
5b8669df DN |
208 | int ret; |
209 | int nid; | |
2525789b | 210 | int pg_order; |
5b8669df | 211 | struct page *page; |
2525789b | 212 | struct xpc_gru_mq_uv *mq; |
6f2584f4 | 213 | struct uv_IO_APIC_route_entry *mmr_value; |
2525789b DN |
214 | |
215 | mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); | |
216 | if (mq == NULL) { | |
217 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " | |
218 | "a xpc_gru_mq_uv structure\n"); | |
219 | ret = -ENOMEM; | |
6f2584f4 JS |
220 | goto out_0; |
221 | } | |
222 | ||
223 | mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), | |
224 | GFP_KERNEL); | |
225 | if (mq->gru_mq_desc == NULL) { | |
226 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " | |
227 | "a gru_message_queue_desc structure\n"); | |
228 | ret = -ENOMEM; | |
2525789b DN |
229 | goto out_1; |
230 | } | |
5b8669df | 231 | |
2525789b DN |
232 | pg_order = get_order(mq_size); |
233 | mq->order = pg_order + PAGE_SHIFT; | |
234 | mq_size = 1UL << mq->order; | |
235 | ||
236 | mq->mmr_blade = uv_cpu_to_blade_id(cpu); | |
237 | ||
238 | nid = cpu_to_node(cpu); | |
6484eb3e | 239 | page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
2525789b | 240 | pg_order); |
bd3e64c1 DN |
241 | if (page == NULL) { |
242 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | |
243 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); | |
2525789b DN |
244 | ret = -ENOMEM; |
245 | goto out_2; | |
bd3e64c1 | 246 | } |
2525789b | 247 | mq->address = page_address(page); |
5b8669df | 248 | |
2525789b DN |
249 | /* enable generation of irq when GRU mq operation occurs to this mq */ |
250 | ret = xpc_gru_mq_watchlist_alloc_uv(mq); | |
251 | if (ret != 0) | |
252 | goto out_3; | |
5b8669df | 253 | |
2525789b DN |
254 | ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); |
255 | if (ret != 0) | |
256 | goto out_4; | |
257 | ||
258 | ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); | |
5b8669df DN |
259 | if (ret != 0) { |
260 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", | |
6f2584f4 | 261 | mq->irq, -ret); |
2525789b | 262 | goto out_5; |
5b8669df DN |
263 | } |
264 | ||
6f2584f4 JS |
265 | mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; |
266 | ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, | |
267 | nid, mmr_value->vector, mmr_value->dest); | |
268 | if (ret != 0) { | |
269 | dev_err(xpc_part, "gru_create_message_queue() returned " | |
270 | "error=%d\n", ret); | |
271 | ret = -EINVAL; | |
272 | goto out_6; | |
273 | } | |
274 | ||
2525789b DN |
275 | /* allow other partitions to access this GRU mq */ |
276 | xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); | |
277 | if (xp_ret != xpSuccess) { | |
278 | ret = -EACCES; | |
279 | goto out_6; | |
280 | } | |
5b8669df DN |
281 | |
282 | return mq; | |
2525789b DN |
283 | |
284 | /* something went wrong */ | |
285 | out_6: | |
286 | free_irq(mq->irq, NULL); | |
287 | out_5: | |
288 | xpc_release_gru_mq_irq_uv(mq); | |
289 | out_4: | |
290 | xpc_gru_mq_watchlist_free_uv(mq); | |
291 | out_3: | |
292 | free_pages((unsigned long)mq->address, pg_order); | |
293 | out_2: | |
6f2584f4 | 294 | kfree(mq->gru_mq_desc); |
2525789b | 295 | out_1: |
6f2584f4 JS |
296 | kfree(mq); |
297 | out_0: | |
2525789b | 298 | return ERR_PTR(ret); |
5b8669df | 299 | } |
94bd2708 | 300 | |
33ba3c77 | 301 | static void |
2525789b | 302 | xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) |
5b8669df | 303 | { |
2525789b DN |
304 | unsigned int mq_size; |
305 | int pg_order; | |
306 | int ret; | |
307 | ||
308 | /* disallow other partitions to access GRU mq */ | |
309 | mq_size = 1UL << mq->order; | |
310 | ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); | |
311 | BUG_ON(ret != xpSuccess); | |
312 | ||
313 | /* unregister irq handler and release mq irq/vector mapping */ | |
314 | free_irq(mq->irq, NULL); | |
315 | xpc_release_gru_mq_irq_uv(mq); | |
5b8669df | 316 | |
2525789b DN |
317 | /* disable generation of irq when GRU mq op occurs to this mq */ |
318 | xpc_gru_mq_watchlist_free_uv(mq); | |
5b8669df | 319 | |
2525789b DN |
320 | pg_order = mq->order - PAGE_SHIFT; |
321 | free_pages((unsigned long)mq->address, pg_order); | |
5b8669df | 322 | |
2525789b | 323 | kfree(mq); |
5b8669df DN |
324 | } |
325 | ||
326 | static enum xp_retval | |
6f2584f4 JS |
327 | xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, |
328 | size_t msg_size) | |
33ba3c77 | 329 | { |
5b8669df DN |
330 | enum xp_retval xp_ret; |
331 | int ret; | |
332 | ||
333 | while (1) { | |
6f2584f4 | 334 | ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); |
5b8669df DN |
335 | if (ret == MQE_OK) { |
336 | xp_ret = xpSuccess; | |
337 | break; | |
338 | } | |
339 | ||
340 | if (ret == MQE_QUEUE_FULL) { | |
341 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " | |
342 | "error=MQE_QUEUE_FULL\n"); | |
343 | /* !!! handle QLimit reached; delay & try again */ | |
344 | /* ??? Do we add a limit to the number of retries? */ | |
345 | (void)msleep_interruptible(10); | |
346 | } else if (ret == MQE_CONGESTION) { | |
347 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " | |
348 | "error=MQE_CONGESTION\n"); | |
349 | /* !!! handle LB Overflow; simply try again */ | |
350 | /* ??? Do we add a limit to the number of retries? */ | |
351 | } else { | |
352 | /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ | |
353 | dev_err(xpc_chan, "gru_send_message_gpa() returned " | |
354 | "error=%d\n", ret); | |
355 | xp_ret = xpGruSendMqError; | |
356 | break; | |
357 | } | |
358 | } | |
359 | return xp_ret; | |
360 | } | |
361 | ||
362 | static void | |
363 | xpc_process_activate_IRQ_rcvd_uv(void) | |
364 | { | |
365 | unsigned long irq_flags; | |
366 | short partid; | |
367 | struct xpc_partition *part; | |
368 | u8 act_state_req; | |
369 | ||
370 | DBUG_ON(xpc_activate_IRQ_rcvd == 0); | |
371 | ||
372 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
373 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | |
374 | part = &xpc_partitions[partid]; | |
375 | ||
376 | if (part->sn.uv.act_state_req == 0) | |
377 | continue; | |
378 | ||
379 | xpc_activate_IRQ_rcvd--; | |
380 | BUG_ON(xpc_activate_IRQ_rcvd < 0); | |
381 | ||
382 | act_state_req = part->sn.uv.act_state_req; | |
383 | part->sn.uv.act_state_req = 0; | |
384 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
385 | ||
386 | if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { | |
387 | if (part->act_state == XPC_P_AS_INACTIVE) | |
388 | xpc_activate_partition(part); | |
389 | else if (part->act_state == XPC_P_AS_DEACTIVATING) | |
390 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); | |
391 | ||
392 | } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { | |
393 | if (part->act_state == XPC_P_AS_INACTIVE) | |
394 | xpc_activate_partition(part); | |
395 | else | |
396 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); | |
397 | ||
398 | } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { | |
399 | XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); | |
400 | ||
401 | } else { | |
402 | BUG(); | |
403 | } | |
404 | ||
405 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
406 | if (xpc_activate_IRQ_rcvd == 0) | |
407 | break; | |
408 | } | |
409 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
410 | ||
411 | } | |
412 | ||
bd3e64c1 DN |
413 | static void |
414 | xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, | |
415 | struct xpc_activate_mq_msghdr_uv *msg_hdr, | |
416 | int *wakeup_hb_checker) | |
5b8669df DN |
417 | { |
418 | unsigned long irq_flags; | |
bd3e64c1 | 419 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
5b8669df | 420 | struct xpc_openclose_args *args; |
5b8669df | 421 | |
bd3e64c1 | 422 | part_uv->remote_act_state = msg_hdr->act_state; |
5b8669df | 423 | |
bd3e64c1 DN |
424 | switch (msg_hdr->type) { |
425 | case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: | |
426 | /* syncing of remote_act_state was just done above */ | |
427 | break; | |
5b8669df | 428 | |
bd3e64c1 DN |
429 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { |
430 | struct xpc_activate_mq_msg_activate_req_uv *msg; | |
5b8669df | 431 | |
bd3e64c1 DN |
432 | /* |
433 | * ??? Do we deal here with ts_jiffies being different | |
434 | * ??? if act_state != XPC_P_AS_INACTIVE instead of | |
435 | * ??? below? | |
436 | */ | |
437 | msg = container_of(msg_hdr, struct | |
438 | xpc_activate_mq_msg_activate_req_uv, hdr); | |
5b8669df | 439 | |
bd3e64c1 DN |
440 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
441 | if (part_uv->act_state_req == 0) | |
442 | xpc_activate_IRQ_rcvd++; | |
443 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; | |
444 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ | |
445 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; | |
a374c57b | 446 | part_uv->heartbeat_gpa = msg->heartbeat_gpa; |
6f2584f4 JS |
447 | |
448 | if (msg->activate_gru_mq_desc_gpa != | |
449 | part_uv->activate_gru_mq_desc_gpa) { | |
450 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
451 | part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; | |
452 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
453 | part_uv->activate_gru_mq_desc_gpa = | |
454 | msg->activate_gru_mq_desc_gpa; | |
455 | } | |
bd3e64c1 | 456 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
5b8669df | 457 | |
bd3e64c1 DN |
458 | (*wakeup_hb_checker)++; |
459 | break; | |
460 | } | |
461 | case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { | |
462 | struct xpc_activate_mq_msg_deactivate_req_uv *msg; | |
5b8669df | 463 | |
bd3e64c1 DN |
464 | msg = container_of(msg_hdr, struct |
465 | xpc_activate_mq_msg_deactivate_req_uv, hdr); | |
5b8669df | 466 | |
bd3e64c1 DN |
467 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
468 | if (part_uv->act_state_req == 0) | |
469 | xpc_activate_IRQ_rcvd++; | |
470 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | |
471 | part_uv->reason = msg->reason; | |
472 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
473 | ||
474 | (*wakeup_hb_checker)++; | |
475 | return; | |
476 | } | |
477 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { | |
478 | struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; | |
5b8669df | 479 | |
bd3e64c1 DN |
480 | msg = container_of(msg_hdr, struct |
481 | xpc_activate_mq_msg_chctl_closerequest_uv, | |
482 | hdr); | |
483 | args = &part->remote_openclose_args[msg->ch_number]; | |
484 | args->reason = msg->reason; | |
5b8669df | 485 | |
bd3e64c1 DN |
486 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
487 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; | |
488 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
5b8669df | 489 | |
bd3e64c1 DN |
490 | xpc_wakeup_channel_mgr(part); |
491 | break; | |
492 | } | |
493 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { | |
494 | struct xpc_activate_mq_msg_chctl_closereply_uv *msg; | |
5b8669df | 495 | |
bd3e64c1 DN |
496 | msg = container_of(msg_hdr, struct |
497 | xpc_activate_mq_msg_chctl_closereply_uv, | |
498 | hdr); | |
5b8669df | 499 | |
bd3e64c1 DN |
500 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
501 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; | |
502 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
5b8669df | 503 | |
bd3e64c1 DN |
504 | xpc_wakeup_channel_mgr(part); |
505 | break; | |
506 | } | |
507 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { | |
508 | struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; | |
509 | ||
510 | msg = container_of(msg_hdr, struct | |
511 | xpc_activate_mq_msg_chctl_openrequest_uv, | |
512 | hdr); | |
513 | args = &part->remote_openclose_args[msg->ch_number]; | |
514 | args->entry_size = msg->entry_size; | |
515 | args->local_nentries = msg->local_nentries; | |
516 | ||
517 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | |
518 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; | |
519 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
520 | ||
521 | xpc_wakeup_channel_mgr(part); | |
522 | break; | |
523 | } | |
524 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { | |
525 | struct xpc_activate_mq_msg_chctl_openreply_uv *msg; | |
526 | ||
527 | msg = container_of(msg_hdr, struct | |
528 | xpc_activate_mq_msg_chctl_openreply_uv, hdr); | |
529 | args = &part->remote_openclose_args[msg->ch_number]; | |
530 | args->remote_nentries = msg->remote_nentries; | |
531 | args->local_nentries = msg->local_nentries; | |
6f2584f4 | 532 | args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; |
bd3e64c1 DN |
533 | |
534 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | |
535 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; | |
536 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
537 | ||
538 | xpc_wakeup_channel_mgr(part); | |
539 | break; | |
540 | } | |
efdd06ed RH |
541 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { |
542 | struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; | |
543 | ||
544 | msg = container_of(msg_hdr, struct | |
545 | xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); | |
546 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | |
547 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; | |
548 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
549 | ||
550 | xpc_wakeup_channel_mgr(part); | |
551 | } | |
bd3e64c1 DN |
552 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: |
553 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
554 | part_uv->flags |= XPC_P_ENGAGED_UV; | |
555 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
556 | break; | |
557 | ||
558 | case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: | |
559 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
560 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | |
561 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
562 | break; | |
563 | ||
564 | default: | |
565 | dev_err(xpc_part, "received unknown activate_mq msg type=%d " | |
566 | "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); | |
567 | ||
568 | /* get hb checker to deactivate from the remote partition */ | |
569 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
570 | if (part_uv->act_state_req == 0) | |
571 | xpc_activate_IRQ_rcvd++; | |
572 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | |
573 | part_uv->reason = xpBadMsgType; | |
574 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
5b8669df | 575 | |
bd3e64c1 DN |
576 | (*wakeup_hb_checker)++; |
577 | return; | |
578 | } | |
5b8669df | 579 | |
bd3e64c1 DN |
580 | if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && |
581 | part->remote_rp_ts_jiffies != 0) { | |
582 | /* | |
583 | * ??? Does what we do here need to be sensitive to | |
584 | * ??? act_state or remote_act_state? | |
585 | */ | |
586 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
587 | if (part_uv->act_state_req == 0) | |
588 | xpc_activate_IRQ_rcvd++; | |
589 | part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; | |
590 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
5b8669df | 591 | |
bd3e64c1 DN |
592 | (*wakeup_hb_checker)++; |
593 | } | |
594 | } | |
595 | ||
596 | static irqreturn_t | |
597 | xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | |
598 | { | |
599 | struct xpc_activate_mq_msghdr_uv *msg_hdr; | |
600 | short partid; | |
601 | struct xpc_partition *part; | |
602 | int wakeup_hb_checker = 0; | |
6f2584f4 | 603 | int part_referenced; |
bd3e64c1 | 604 | |
2525789b | 605 | while (1) { |
6f2584f4 | 606 | msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); |
2525789b DN |
607 | if (msg_hdr == NULL) |
608 | break; | |
bd3e64c1 DN |
609 | |
610 | partid = msg_hdr->partid; | |
611 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | |
612 | dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " | |
613 | "received invalid partid=0x%x in message\n", | |
614 | partid); | |
615 | } else { | |
616 | part = &xpc_partitions[partid]; | |
6f2584f4 JS |
617 | |
618 | part_referenced = xpc_part_ref(part); | |
619 | xpc_handle_activate_mq_msg_uv(part, msg_hdr, | |
620 | &wakeup_hb_checker); | |
621 | if (part_referenced) | |
bd3e64c1 | 622 | xpc_part_deref(part); |
5b8669df DN |
623 | } |
624 | ||
6f2584f4 | 625 | gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); |
5b8669df DN |
626 | } |
627 | ||
628 | if (wakeup_hb_checker) | |
629 | wake_up_interruptible(&xpc_activate_IRQ_wq); | |
630 | ||
631 | return IRQ_HANDLED; | |
632 | } | |
633 | ||
6f2584f4 JS |
634 | static enum xp_retval |
635 | xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, | |
636 | unsigned long gru_mq_desc_gpa) | |
637 | { | |
638 | enum xp_retval ret; | |
639 | ||
640 | ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, | |
641 | sizeof(struct gru_message_queue_desc)); | |
642 | if (ret == xpSuccess) | |
643 | gru_mq_desc->mq = NULL; | |
644 | ||
645 | return ret; | |
646 | } | |
647 | ||
5b8669df DN |
648 | static enum xp_retval |
649 | xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, | |
650 | int msg_type) | |
651 | { | |
652 | struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; | |
6f2584f4 JS |
653 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
654 | struct gru_message_queue_desc *gru_mq_desc; | |
655 | unsigned long irq_flags; | |
656 | enum xp_retval ret; | |
5b8669df DN |
657 | |
658 | DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); | |
659 | ||
660 | msg_hdr->type = msg_type; | |
6f2584f4 | 661 | msg_hdr->partid = xp_partition_id; |
5b8669df DN |
662 | msg_hdr->act_state = part->act_state; |
663 | msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; | |
664 | ||
6f2584f4 JS |
665 | mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); |
666 | again: | |
667 | if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { | |
668 | gru_mq_desc = part_uv->cached_activate_gru_mq_desc; | |
669 | if (gru_mq_desc == NULL) { | |
670 | gru_mq_desc = kmalloc(sizeof(struct | |
671 | gru_message_queue_desc), | |
672 | GFP_KERNEL); | |
673 | if (gru_mq_desc == NULL) { | |
674 | ret = xpNoMemory; | |
675 | goto done; | |
676 | } | |
677 | part_uv->cached_activate_gru_mq_desc = gru_mq_desc; | |
678 | } | |
679 | ||
680 | ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, | |
681 | part_uv-> | |
682 | activate_gru_mq_desc_gpa); | |
683 | if (ret != xpSuccess) | |
684 | goto done; | |
685 | ||
686 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
687 | part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; | |
688 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
689 | } | |
690 | ||
5b8669df | 691 | /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ |
6f2584f4 JS |
692 | ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, |
693 | msg_size); | |
694 | if (ret != xpSuccess) { | |
695 | smp_rmb(); /* ensure a fresh copy of part_uv->flags */ | |
696 | if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) | |
697 | goto again; | |
698 | } | |
699 | done: | |
700 | mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); | |
701 | return ret; | |
5b8669df DN |
702 | } |
703 | ||
704 | static void | |
705 | xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, | |
706 | size_t msg_size, int msg_type) | |
707 | { | |
708 | enum xp_retval ret; | |
709 | ||
710 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); | |
711 | if (unlikely(ret != xpSuccess)) | |
712 | XPC_DEACTIVATE_PARTITION(part, ret); | |
713 | } | |
714 | ||
715 | static void | |
716 | xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, | |
717 | void *msg, size_t msg_size, int msg_type) | |
718 | { | |
6f2584f4 | 719 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
5b8669df DN |
720 | enum xp_retval ret; |
721 | ||
722 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); | |
723 | if (unlikely(ret != xpSuccess)) { | |
724 | if (irq_flags != NULL) | |
725 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | |
726 | ||
727 | XPC_DEACTIVATE_PARTITION(part, ret); | |
728 | ||
729 | if (irq_flags != NULL) | |
730 | spin_lock_irqsave(&ch->lock, *irq_flags); | |
731 | } | |
732 | } | |
733 | ||
734 | static void | |
735 | xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) | |
736 | { | |
737 | unsigned long irq_flags; | |
738 | struct xpc_partition_uv *part_uv = &part->sn.uv; | |
739 | ||
33ba3c77 | 740 | /* |
7d9d1f25 | 741 | * !!! Make our side think that the remote partition sent an activate |
a374c57b | 742 | * !!! mq message our way by doing what the activate IRQ handler would |
ea57f80c | 743 | * !!! do had one really been sent. |
33ba3c77 | 744 | */ |
5b8669df DN |
745 | |
746 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
747 | if (part_uv->act_state_req == 0) | |
748 | xpc_activate_IRQ_rcvd++; | |
749 | part_uv->act_state_req = act_state_req; | |
750 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
751 | ||
752 | wake_up_interruptible(&xpc_activate_IRQ_wq); | |
33ba3c77 DN |
753 | } |
754 | ||
94bd2708 | 755 | static enum xp_retval |
5b8669df DN |
756 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, |
757 | size_t *len) | |
94bd2708 | 758 | { |
7d9d1f25 DN |
759 | s64 status; |
760 | enum xp_retval ret; | |
761 | ||
762 | #if defined CONFIG_X86_64 | |
763 | status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, | |
764 | (u64 *)len); | |
765 | if (status == BIOS_STATUS_SUCCESS) | |
766 | ret = xpSuccess; | |
767 | else if (status == BIOS_STATUS_MORE_PASSES) | |
768 | ret = xpNeedMoreInfo; | |
769 | else | |
770 | ret = xpBiosError; | |
771 | ||
772 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | |
773 | status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); | |
774 | if (status == SALRET_OK) | |
775 | ret = xpSuccess; | |
776 | else if (status == SALRET_MORE_PASSES) | |
777 | ret = xpNeedMoreInfo; | |
778 | else | |
779 | ret = xpSalError; | |
780 | ||
781 | #else | |
782 | #error not a supported configuration | |
783 | #endif | |
784 | ||
785 | return ret; | |
5b8669df DN |
786 | } |
787 | ||
788 | static int | |
a7665b0a | 789 | xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) |
5b8669df | 790 | { |
a374c57b RH |
791 | xpc_heartbeat_uv = |
792 | &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; | |
793 | rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); | |
794 | rp->sn.uv.activate_gru_mq_desc_gpa = | |
6f2584f4 | 795 | uv_gpa(xpc_activate_mq_uv->gru_mq_desc); |
5b8669df DN |
796 | return 0; |
797 | } | |
798 | ||
799 | static void | |
a374c57b | 800 | xpc_allow_hb_uv(short partid) |
5b8669df | 801 | { |
a374c57b | 802 | } |
5b8669df | 803 | |
a374c57b RH |
804 | static void |
805 | xpc_disallow_hb_uv(short partid) | |
806 | { | |
807 | } | |
5b8669df | 808 | |
a374c57b RH |
809 | static void |
810 | xpc_disallow_all_hbs_uv(void) | |
811 | { | |
94bd2708 DN |
812 | } |
813 | ||
33ba3c77 DN |
814 | static void |
815 | xpc_increment_heartbeat_uv(void) | |
816 | { | |
a374c57b | 817 | xpc_heartbeat_uv->value++; |
5b8669df DN |
818 | } |
819 | ||
820 | static void | |
821 | xpc_offline_heartbeat_uv(void) | |
822 | { | |
a374c57b RH |
823 | xpc_increment_heartbeat_uv(); |
824 | xpc_heartbeat_uv->offline = 1; | |
5b8669df DN |
825 | } |
826 | ||
827 | static void | |
828 | xpc_online_heartbeat_uv(void) | |
829 | { | |
a374c57b RH |
830 | xpc_increment_heartbeat_uv(); |
831 | xpc_heartbeat_uv->offline = 0; | |
33ba3c77 DN |
832 | } |
833 | ||
834 | static void | |
835 | xpc_heartbeat_init_uv(void) | |
836 | { | |
a374c57b RH |
837 | xpc_heartbeat_uv->value = 1; |
838 | xpc_heartbeat_uv->offline = 0; | |
33ba3c77 DN |
839 | } |
840 | ||
841 | static void | |
842 | xpc_heartbeat_exit_uv(void) | |
843 | { | |
a374c57b | 844 | xpc_offline_heartbeat_uv(); |
5b8669df DN |
845 | } |
846 | ||
847 | static enum xp_retval | |
848 | xpc_get_remote_heartbeat_uv(struct xpc_partition *part) | |
849 | { | |
850 | struct xpc_partition_uv *part_uv = &part->sn.uv; | |
a374c57b | 851 | enum xp_retval ret; |
5b8669df | 852 | |
a374c57b RH |
853 | ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), |
854 | part_uv->heartbeat_gpa, | |
855 | sizeof(struct xpc_heartbeat_uv)); | |
856 | if (ret != xpSuccess) | |
857 | return ret; | |
5b8669df | 858 | |
a374c57b RH |
859 | if (part_uv->cached_heartbeat.value == part->last_heartbeat && |
860 | !part_uv->cached_heartbeat.offline) { | |
5b8669df | 861 | |
a374c57b RH |
862 | ret = xpNoHeartbeat; |
863 | } else { | |
864 | part->last_heartbeat = part_uv->cached_heartbeat.value; | |
5b8669df DN |
865 | } |
866 | return ret; | |
33ba3c77 DN |
867 | } |
868 | ||
869 | static void | |
a47d5dac | 870 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, |
5b8669df | 871 | unsigned long remote_rp_gpa, int nasid) |
33ba3c77 DN |
872 | { |
873 | short partid = remote_rp->SAL_partid; | |
874 | struct xpc_partition *part = &xpc_partitions[partid]; | |
5b8669df | 875 | struct xpc_activate_mq_msg_activate_req_uv msg; |
33ba3c77 | 876 | |
5b8669df DN |
877 | part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ |
878 | part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; | |
a374c57b | 879 | part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; |
6f2584f4 | 880 | part->sn.uv.activate_gru_mq_desc_gpa = |
a374c57b | 881 | remote_rp->sn.uv.activate_gru_mq_desc_gpa; |
5b8669df DN |
882 | |
883 | /* | |
884 | * ??? Is it a good idea to make this conditional on what is | |
885 | * ??? potentially stale state information? | |
886 | */ | |
887 | if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { | |
888 | msg.rp_gpa = uv_gpa(xpc_rsvd_page); | |
a374c57b | 889 | msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; |
6f2584f4 | 890 | msg.activate_gru_mq_desc_gpa = |
a374c57b | 891 | xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; |
5b8669df DN |
892 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
893 | XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); | |
894 | } | |
33ba3c77 | 895 | |
5b8669df DN |
896 | if (part->act_state == XPC_P_AS_INACTIVE) |
897 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); | |
33ba3c77 DN |
898 | } |
899 | ||
a47d5dac DN |
900 | static void |
901 | xpc_request_partition_reactivation_uv(struct xpc_partition *part) | |
902 | { | |
5b8669df DN |
903 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); |
904 | } | |
905 | ||
906 | static void | |
907 | xpc_request_partition_deactivation_uv(struct xpc_partition *part) | |
908 | { | |
909 | struct xpc_activate_mq_msg_deactivate_req_uv msg; | |
910 | ||
911 | /* | |
912 | * ??? Is it a good idea to make this conditional on what is | |
913 | * ??? potentially stale state information? | |
914 | */ | |
915 | if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && | |
916 | part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { | |
917 | ||
918 | msg.reason = part->reason; | |
919 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | |
920 | XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); | |
921 | } | |
a47d5dac DN |
922 | } |
923 | ||
bd3e64c1 DN |
924 | static void |
925 | xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) | |
926 | { | |
927 | /* nothing needs to be done */ | |
928 | return; | |
929 | } | |
930 | ||
931 | static void | |
932 | xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) | |
933 | { | |
934 | head->first = NULL; | |
935 | head->last = NULL; | |
936 | spin_lock_init(&head->lock); | |
937 | head->n_entries = 0; | |
938 | } | |
939 | ||
940 | static void * | |
941 | xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) | |
942 | { | |
943 | unsigned long irq_flags; | |
944 | struct xpc_fifo_entry_uv *first; | |
945 | ||
946 | spin_lock_irqsave(&head->lock, irq_flags); | |
947 | first = head->first; | |
948 | if (head->first != NULL) { | |
949 | head->first = first->next; | |
950 | if (head->first == NULL) | |
951 | head->last = NULL; | |
15b87d67 RH |
952 | |
953 | head->n_entries--; | |
954 | BUG_ON(head->n_entries < 0); | |
955 | ||
956 | first->next = NULL; | |
bd3e64c1 | 957 | } |
bd3e64c1 | 958 | spin_unlock_irqrestore(&head->lock, irq_flags); |
bd3e64c1 DN |
959 | return first; |
960 | } | |
961 | ||
962 | static void | |
963 | xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, | |
964 | struct xpc_fifo_entry_uv *last) | |
965 | { | |
966 | unsigned long irq_flags; | |
967 | ||
968 | last->next = NULL; | |
969 | spin_lock_irqsave(&head->lock, irq_flags); | |
970 | if (head->last != NULL) | |
971 | head->last->next = last; | |
972 | else | |
973 | head->first = last; | |
974 | head->last = last; | |
6f2584f4 | 975 | head->n_entries++; |
bd3e64c1 DN |
976 | spin_unlock_irqrestore(&head->lock, irq_flags); |
977 | } | |
978 | ||
979 | static int | |
980 | xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) | |
981 | { | |
982 | return head->n_entries; | |
983 | } | |
984 | ||
e17d416b | 985 | /* |
5b8669df | 986 | * Setup the channel structures that are uv specific. |
e17d416b DN |
987 | */ |
988 | static enum xp_retval | |
a7665b0a | 989 | xpc_setup_ch_structures_uv(struct xpc_partition *part) |
e17d416b | 990 | { |
bd3e64c1 DN |
991 | struct xpc_channel_uv *ch_uv; |
992 | int ch_number; | |
993 | ||
994 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
995 | ch_uv = &part->channels[ch_number].sn.uv; | |
996 | ||
997 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | |
998 | xpc_init_fifo_uv(&ch_uv->recv_msg_list); | |
999 | } | |
1000 | ||
1001 | return xpSuccess; | |
e17d416b DN |
1002 | } |
1003 | ||
1004 | /* | |
5b8669df | 1005 | * Teardown the channel structures that are uv specific. |
e17d416b DN |
1006 | */ |
1007 | static void | |
a7665b0a | 1008 | xpc_teardown_ch_structures_uv(struct xpc_partition *part) |
e17d416b | 1009 | { |
bd3e64c1 | 1010 | /* nothing needs to be done */ |
e17d416b DN |
1011 | return; |
1012 | } | |
1013 | ||
1014 | static enum xp_retval | |
1015 | xpc_make_first_contact_uv(struct xpc_partition *part) | |
1016 | { | |
5b8669df DN |
1017 | struct xpc_activate_mq_msg_uv msg; |
1018 | ||
1019 | /* | |
1020 | * We send a sync msg to get the remote partition's remote_act_state | |
1021 | * updated to our current act_state which at this point should | |
1022 | * be XPC_P_AS_ACTIVATING. | |
1023 | */ | |
1024 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | |
1025 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); | |
1026 | ||
dbd2918e RH |
1027 | while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || |
1028 | (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { | |
5b8669df DN |
1029 | |
1030 | dev_dbg(xpc_part, "waiting to make first contact with " | |
1031 | "partition %d\n", XPC_PARTID(part)); | |
1032 | ||
1033 | /* wait a 1/4 of a second or so */ | |
1034 | (void)msleep_interruptible(250); | |
1035 | ||
1036 | if (part->act_state == XPC_P_AS_DEACTIVATING) | |
1037 | return part->reason; | |
1038 | } | |
1039 | ||
1040 | return xpSuccess; | |
e17d416b DN |
1041 | } |
1042 | ||
1043 | static u64 | |
7fb5e59d | 1044 | xpc_get_chctl_all_flags_uv(struct xpc_partition *part) |
e17d416b | 1045 | { |
5b8669df DN |
1046 | unsigned long irq_flags; |
1047 | union xpc_channel_ctl_flags chctl; | |
1048 | ||
1049 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | |
1050 | chctl = part->chctl; | |
1051 | if (chctl.all_flags != 0) | |
1052 | part->chctl.all_flags = 0; | |
1053 | ||
1054 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
1055 | return chctl.all_flags; | |
1056 | } | |
1057 | ||
bd3e64c1 DN |
1058 | static enum xp_retval |
1059 | xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) | |
1060 | { | |
1061 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | |
1062 | struct xpc_send_msg_slot_uv *msg_slot; | |
1063 | unsigned long irq_flags; | |
1064 | int nentries; | |
1065 | int entry; | |
1066 | size_t nbytes; | |
1067 | ||
1068 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { | |
1069 | nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); | |
1070 | ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); | |
1071 | if (ch_uv->send_msg_slots == NULL) | |
1072 | continue; | |
1073 | ||
1074 | for (entry = 0; entry < nentries; entry++) { | |
1075 | msg_slot = &ch_uv->send_msg_slots[entry]; | |
1076 | ||
1077 | msg_slot->msg_slot_number = entry; | |
1078 | xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, | |
1079 | &msg_slot->next); | |
1080 | } | |
1081 | ||
1082 | spin_lock_irqsave(&ch->lock, irq_flags); | |
1083 | if (nentries < ch->local_nentries) | |
1084 | ch->local_nentries = nentries; | |
1085 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
1086 | return xpSuccess; | |
1087 | } | |
1088 | ||
1089 | return xpNoMemory; | |
1090 | } | |
1091 | ||
1092 | static enum xp_retval | |
1093 | xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) | |
1094 | { | |
1095 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | |
1096 | struct xpc_notify_mq_msg_uv *msg_slot; | |
1097 | unsigned long irq_flags; | |
1098 | int nentries; | |
1099 | int entry; | |
1100 | size_t nbytes; | |
1101 | ||
1102 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { | |
1103 | nbytes = nentries * ch->entry_size; | |
1104 | ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); | |
1105 | if (ch_uv->recv_msg_slots == NULL) | |
1106 | continue; | |
1107 | ||
1108 | for (entry = 0; entry < nentries; entry++) { | |
361916a9 DN |
1109 | msg_slot = ch_uv->recv_msg_slots + |
1110 | entry * ch->entry_size; | |
bd3e64c1 DN |
1111 | |
1112 | msg_slot->hdr.msg_slot_number = entry; | |
1113 | } | |
1114 | ||
1115 | spin_lock_irqsave(&ch->lock, irq_flags); | |
1116 | if (nentries < ch->remote_nentries) | |
1117 | ch->remote_nentries = nentries; | |
1118 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
1119 | return xpSuccess; | |
1120 | } | |
1121 | ||
1122 | return xpNoMemory; | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * Allocate msg_slots associated with the channel. | |
1127 | */ | |
5b8669df DN |
1128 | static enum xp_retval |
1129 | xpc_setup_msg_structures_uv(struct xpc_channel *ch) | |
1130 | { | |
bd3e64c1 DN |
1131 | static enum xp_retval ret; |
1132 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | |
1133 | ||
1134 | DBUG_ON(ch->flags & XPC_C_SETUP); | |
1135 | ||
6f2584f4 JS |
1136 | ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct |
1137 | gru_message_queue_desc), | |
1138 | GFP_KERNEL); | |
1139 | if (ch_uv->cached_notify_gru_mq_desc == NULL) | |
1140 | return xpNoMemory; | |
1141 | ||
bd3e64c1 DN |
1142 | ret = xpc_allocate_send_msg_slot_uv(ch); |
1143 | if (ret == xpSuccess) { | |
1144 | ||
1145 | ret = xpc_allocate_recv_msg_slot_uv(ch); | |
1146 | if (ret != xpSuccess) { | |
1147 | kfree(ch_uv->send_msg_slots); | |
1148 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | |
1149 | } | |
1150 | } | |
1151 | return ret; | |
5b8669df DN |
1152 | } |
1153 | ||
bd3e64c1 DN |
1154 | /* |
1155 | * Free up msg_slots and clear other stuff that were setup for the specified | |
1156 | * channel. | |
1157 | */ | |
5b8669df DN |
1158 | static void |
1159 | xpc_teardown_msg_structures_uv(struct xpc_channel *ch) | |
1160 | { | |
1161 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | |
1162 | ||
bd3e64c1 DN |
1163 | DBUG_ON(!spin_is_locked(&ch->lock)); |
1164 | ||
6f2584f4 JS |
1165 | kfree(ch_uv->cached_notify_gru_mq_desc); |
1166 | ch_uv->cached_notify_gru_mq_desc = NULL; | |
5b8669df | 1167 | |
bd3e64c1 DN |
1168 | if (ch->flags & XPC_C_SETUP) { |
1169 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | |
1170 | kfree(ch_uv->send_msg_slots); | |
1171 | xpc_init_fifo_uv(&ch_uv->recv_msg_list); | |
1172 | kfree(ch_uv->recv_msg_slots); | |
1173 | } | |
5b8669df DN |
1174 | } |
1175 | ||
1176 | static void | |
1177 | xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |
1178 | { | |
1179 | struct xpc_activate_mq_msg_chctl_closerequest_uv msg; | |
1180 | ||
1181 | msg.ch_number = ch->number; | |
1182 | msg.reason = ch->reason; | |
1183 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | |
1184 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); | |
1185 | } | |
1186 | ||
1187 | static void | |
1188 | xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |
1189 | { | |
1190 | struct xpc_activate_mq_msg_chctl_closereply_uv msg; | |
1191 | ||
1192 | msg.ch_number = ch->number; | |
1193 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | |
1194 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); | |
1195 | } | |
1196 | ||
1197 | static void | |
1198 | xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |
1199 | { | |
1200 | struct xpc_activate_mq_msg_chctl_openrequest_uv msg; | |
1201 | ||
1202 | msg.ch_number = ch->number; | |
bd3e64c1 | 1203 | msg.entry_size = ch->entry_size; |
5b8669df DN |
1204 | msg.local_nentries = ch->local_nentries; |
1205 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | |
1206 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); | |
1207 | } | |
1208 | ||
1209 | static void | |
1210 | xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |
1211 | { | |
1212 | struct xpc_activate_mq_msg_chctl_openreply_uv msg; | |
1213 | ||
1214 | msg.ch_number = ch->number; | |
1215 | msg.local_nentries = ch->local_nentries; | |
1216 | msg.remote_nentries = ch->remote_nentries; | |
6f2584f4 | 1217 | msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); |
5b8669df DN |
1218 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
1219 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); | |
1220 | } | |
1221 | ||
efdd06ed RH |
1222 | static void |
1223 | xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |
1224 | { | |
1225 | struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; | |
1226 | ||
1227 | msg.ch_number = ch->number; | |
1228 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | |
1229 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); | |
1230 | } | |
1231 | ||
bd3e64c1 DN |
1232 | static void |
1233 | xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) | |
1234 | { | |
1235 | unsigned long irq_flags; | |
1236 | ||
1237 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | |
1238 | part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; | |
1239 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | |
1240 | ||
1241 | xpc_wakeup_channel_mgr(part); | |
1242 | } | |
1243 | ||
6f2584f4 | 1244 | static enum xp_retval |
5b8669df | 1245 | xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, |
6f2584f4 | 1246 | unsigned long gru_mq_desc_gpa) |
5b8669df | 1247 | { |
6f2584f4 JS |
1248 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; |
1249 | ||
1250 | DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); | |
1251 | return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, | |
1252 | gru_mq_desc_gpa); | |
5b8669df DN |
1253 | } |
1254 | ||
1255 | static void | |
1256 | xpc_indicate_partition_engaged_uv(struct xpc_partition *part) | |
1257 | { | |
1258 | struct xpc_activate_mq_msg_uv msg; | |
1259 | ||
1260 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | |
1261 | XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); | |
1262 | } | |
1263 | ||
1264 | static void | |
1265 | xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) | |
1266 | { | |
1267 | struct xpc_activate_mq_msg_uv msg; | |
1268 | ||
1269 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | |
1270 | XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); | |
1271 | } | |
1272 | ||
1273 | static void | |
1274 | xpc_assume_partition_disengaged_uv(short partid) | |
1275 | { | |
1276 | struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; | |
1277 | unsigned long irq_flags; | |
1278 | ||
1279 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | |
1280 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | |
1281 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | |
1282 | } | |
1283 | ||
1284 | static int | |
1285 | xpc_partition_engaged_uv(short partid) | |
1286 | { | |
1287 | return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; | |
1288 | } | |
1289 | ||
1290 | static int | |
1291 | xpc_any_partition_engaged_uv(void) | |
1292 | { | |
1293 | struct xpc_partition_uv *part_uv; | |
1294 | short partid; | |
1295 | ||
1296 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | |
1297 | part_uv = &xpc_partitions[partid].sn.uv; | |
1298 | if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) | |
1299 | return 1; | |
1300 | } | |
1301 | return 0; | |
e17d416b DN |
1302 | } |
1303 | ||
bd3e64c1 DN |
1304 | static enum xp_retval |
1305 | xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, | |
1306 | struct xpc_send_msg_slot_uv **address_of_msg_slot) | |
1307 | { | |
1308 | enum xp_retval ret; | |
1309 | struct xpc_send_msg_slot_uv *msg_slot; | |
1310 | struct xpc_fifo_entry_uv *entry; | |
1311 | ||
1312 | while (1) { | |
1313 | entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); | |
1314 | if (entry != NULL) | |
1315 | break; | |
1316 | ||
1317 | if (flags & XPC_NOWAIT) | |
1318 | return xpNoWait; | |
1319 | ||
1320 | ret = xpc_allocate_msg_wait(ch); | |
1321 | if (ret != xpInterrupted && ret != xpTimeout) | |
1322 | return ret; | |
1323 | } | |
1324 | ||
1325 | msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); | |
1326 | *address_of_msg_slot = msg_slot; | |
1327 | return xpSuccess; | |
1328 | } | |
1329 | ||
1330 | static void | |
1331 | xpc_free_msg_slot_uv(struct xpc_channel *ch, | |
1332 | struct xpc_send_msg_slot_uv *msg_slot) | |
1333 | { | |
1334 | xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); | |
1335 | ||
1336 | /* wakeup anyone waiting for a free msg slot */ | |
1337 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) | |
1338 | wake_up(&ch->msg_allocate_wq); | |
1339 | } | |
1340 | ||
1341 | static void | |
1342 | xpc_notify_sender_uv(struct xpc_channel *ch, | |
1343 | struct xpc_send_msg_slot_uv *msg_slot, | |
1344 | enum xp_retval reason) | |
1345 | { | |
1346 | xpc_notify_func func = msg_slot->func; | |
1347 | ||
1348 | if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { | |
1349 | ||
1350 | atomic_dec(&ch->n_to_notify); | |
1351 | ||
1352 | dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " | |
1353 | "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, | |
1354 | msg_slot->msg_slot_number, ch->partid, ch->number); | |
1355 | ||
1356 | func(reason, ch->partid, ch->number, msg_slot->key); | |
1357 | ||
1358 | dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " | |
1359 | "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, | |
1360 | msg_slot->msg_slot_number, ch->partid, ch->number); | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | static void | |
1365 | xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, | |
1366 | struct xpc_notify_mq_msg_uv *msg) | |
1367 | { | |
1368 | struct xpc_send_msg_slot_uv *msg_slot; | |
1369 | int entry = msg->hdr.msg_slot_number % ch->local_nentries; | |
1370 | ||
1371 | msg_slot = &ch->sn.uv.send_msg_slots[entry]; | |
1372 | ||
1373 | BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); | |
1374 | msg_slot->msg_slot_number += ch->local_nentries; | |
1375 | ||
1376 | if (msg_slot->func != NULL) | |
1377 | xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); | |
1378 | ||
1379 | xpc_free_msg_slot_uv(ch, msg_slot); | |
1380 | } | |
1381 | ||
1382 | static void | |
1383 | xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, | |
1384 | struct xpc_notify_mq_msg_uv *msg) | |
1385 | { | |
1386 | struct xpc_partition_uv *part_uv = &part->sn.uv; | |
1387 | struct xpc_channel *ch; | |
1388 | struct xpc_channel_uv *ch_uv; | |
1389 | struct xpc_notify_mq_msg_uv *msg_slot; | |
1390 | unsigned long irq_flags; | |
1391 | int ch_number = msg->hdr.ch_number; | |
1392 | ||
1393 | if (unlikely(ch_number >= part->nchannels)) { | |
1394 | dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " | |
1395 | "channel number=0x%x in message from partid=%d\n", | |
1396 | ch_number, XPC_PARTID(part)); | |
1397 | ||
1398 | /* get hb checker to deactivate from the remote partition */ | |
1399 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
1400 | if (part_uv->act_state_req == 0) | |
1401 | xpc_activate_IRQ_rcvd++; | |
1402 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | |
1403 | part_uv->reason = xpBadChannelNumber; | |
1404 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
1405 | ||
1406 | wake_up_interruptible(&xpc_activate_IRQ_wq); | |
1407 | return; | |
1408 | } | |
1409 | ||
1410 | ch = &part->channels[ch_number]; | |
1411 | xpc_msgqueue_ref(ch); | |
1412 | ||
1413 | if (!(ch->flags & XPC_C_CONNECTED)) { | |
1414 | xpc_msgqueue_deref(ch); | |
1415 | return; | |
1416 | } | |
1417 | ||
1418 | /* see if we're really dealing with an ACK for a previously sent msg */ | |
1419 | if (msg->hdr.size == 0) { | |
1420 | xpc_handle_notify_mq_ack_uv(ch, msg); | |
1421 | xpc_msgqueue_deref(ch); | |
1422 | return; | |
1423 | } | |
1424 | ||
1425 | /* we're dealing with a normal message sent via the notify_mq */ | |
1426 | ch_uv = &ch->sn.uv; | |
1427 | ||
361916a9 DN |
1428 | msg_slot = ch_uv->recv_msg_slots + |
1429 | (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; | |
bd3e64c1 | 1430 | |
bd3e64c1 DN |
1431 | BUG_ON(msg_slot->hdr.size != 0); |
1432 | ||
1433 | memcpy(msg_slot, msg, msg->hdr.size); | |
1434 | ||
1435 | xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); | |
1436 | ||
1437 | if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { | |
1438 | /* | |
1439 | * If there is an existing idle kthread get it to deliver | |
1440 | * the payload, otherwise we'll have to get the channel mgr | |
1441 | * for this partition to create a kthread to do the delivery. | |
1442 | */ | |
1443 | if (atomic_read(&ch->kthreads_idle) > 0) | |
1444 | wake_up_nr(&ch->idle_wq, 1); | |
1445 | else | |
1446 | xpc_send_chctl_local_msgrequest_uv(part, ch->number); | |
1447 | } | |
1448 | xpc_msgqueue_deref(ch); | |
1449 | } | |
1450 | ||
1451 | static irqreturn_t | |
1452 | xpc_handle_notify_IRQ_uv(int irq, void *dev_id) | |
1453 | { | |
1454 | struct xpc_notify_mq_msg_uv *msg; | |
1455 | short partid; | |
1456 | struct xpc_partition *part; | |
1457 | ||
6f2584f4 JS |
1458 | while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != |
1459 | NULL) { | |
bd3e64c1 DN |
1460 | |
1461 | partid = msg->hdr.partid; | |
1462 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | |
1463 | dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " | |
1464 | "invalid partid=0x%x in message\n", partid); | |
1465 | } else { | |
1466 | part = &xpc_partitions[partid]; | |
1467 | ||
1468 | if (xpc_part_ref(part)) { | |
1469 | xpc_handle_notify_mq_msg_uv(part, msg); | |
1470 | xpc_part_deref(part); | |
1471 | } | |
1472 | } | |
1473 | ||
6f2584f4 | 1474 | gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); |
bd3e64c1 DN |
1475 | } |
1476 | ||
1477 | return IRQ_HANDLED; | |
1478 | } | |
1479 | ||
1480 | static int | |
1481 | xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) | |
1482 | { | |
1483 | return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); | |
1484 | } | |
1485 | ||
1486 | static void | |
1487 | xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) | |
1488 | { | |
1489 | struct xpc_channel *ch = &part->channels[ch_number]; | |
1490 | int ndeliverable_payloads; | |
1491 | ||
1492 | xpc_msgqueue_ref(ch); | |
1493 | ||
1494 | ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); | |
1495 | ||
1496 | if (ndeliverable_payloads > 0 && | |
1497 | (ch->flags & XPC_C_CONNECTED) && | |
1498 | (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { | |
1499 | ||
1500 | xpc_activate_kthreads(ch, ndeliverable_payloads); | |
1501 | } | |
1502 | ||
1503 | xpc_msgqueue_deref(ch); | |
1504 | } | |
1505 | ||
1506 | static enum xp_retval | |
1507 | xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, | |
1508 | u16 payload_size, u8 notify_type, xpc_notify_func func, | |
1509 | void *key) | |
1510 | { | |
1511 | enum xp_retval ret = xpSuccess; | |
1512 | struct xpc_send_msg_slot_uv *msg_slot = NULL; | |
1513 | struct xpc_notify_mq_msg_uv *msg; | |
1514 | u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; | |
1515 | size_t msg_size; | |
1516 | ||
1517 | DBUG_ON(notify_type != XPC_N_CALL); | |
1518 | ||
1519 | msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; | |
1520 | if (msg_size > ch->entry_size) | |
1521 | return xpPayloadTooBig; | |
1522 | ||
1523 | xpc_msgqueue_ref(ch); | |
1524 | ||
1525 | if (ch->flags & XPC_C_DISCONNECTING) { | |
1526 | ret = ch->reason; | |
1527 | goto out_1; | |
1528 | } | |
1529 | if (!(ch->flags & XPC_C_CONNECTED)) { | |
1530 | ret = xpNotConnected; | |
1531 | goto out_1; | |
1532 | } | |
1533 | ||
1534 | ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); | |
1535 | if (ret != xpSuccess) | |
1536 | goto out_1; | |
1537 | ||
1538 | if (func != NULL) { | |
1539 | atomic_inc(&ch->n_to_notify); | |
1540 | ||
1541 | msg_slot->key = key; | |
69b3bb65 | 1542 | smp_wmb(); /* a non-NULL func must hit memory after the key */ |
bd3e64c1 DN |
1543 | msg_slot->func = func; |
1544 | ||
1545 | if (ch->flags & XPC_C_DISCONNECTING) { | |
1546 | ret = ch->reason; | |
1547 | goto out_2; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; | |
1552 | msg->hdr.partid = xp_partition_id; | |
1553 | msg->hdr.ch_number = ch->number; | |
1554 | msg->hdr.size = msg_size; | |
1555 | msg->hdr.msg_slot_number = msg_slot->msg_slot_number; | |
1556 | memcpy(&msg->payload, payload, payload_size); | |
1557 | ||
6f2584f4 JS |
1558 | ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, |
1559 | msg_size); | |
bd3e64c1 DN |
1560 | if (ret == xpSuccess) |
1561 | goto out_1; | |
1562 | ||
1563 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | |
1564 | out_2: | |
1565 | if (func != NULL) { | |
1566 | /* | |
1567 | * Try to NULL the msg_slot's func field. If we fail, then | |
1568 | * xpc_notify_senders_of_disconnect_uv() beat us to it, in which | |
1569 | * case we need to pretend we succeeded to send the message | |
1570 | * since the user will get a callout for the disconnect error | |
1571 | * by xpc_notify_senders_of_disconnect_uv(), and to also get an | |
1572 | * error returned here will confuse them. Additionally, since | |
1573 | * in this case the channel is being disconnected we don't need | |
1574 | * to put the the msg_slot back on the free list. | |
1575 | */ | |
1576 | if (cmpxchg(&msg_slot->func, func, NULL) != func) { | |
1577 | ret = xpSuccess; | |
1578 | goto out_1; | |
1579 | } | |
1580 | ||
1581 | msg_slot->key = NULL; | |
1582 | atomic_dec(&ch->n_to_notify); | |
1583 | } | |
1584 | xpc_free_msg_slot_uv(ch, msg_slot); | |
1585 | out_1: | |
1586 | xpc_msgqueue_deref(ch); | |
1587 | return ret; | |
1588 | } | |
1589 | ||
1590 | /* | |
1591 | * Tell the callers of xpc_send_notify() that the status of their payloads | |
1592 | * is unknown because the channel is now disconnecting. | |
1593 | * | |
1594 | * We don't worry about putting these msg_slots on the free list since the | |
1595 | * msg_slots themselves are about to be kfree'd. | |
1596 | */ | |
1597 | static void | |
1598 | xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) | |
1599 | { | |
1600 | struct xpc_send_msg_slot_uv *msg_slot; | |
1601 | int entry; | |
1602 | ||
1603 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); | |
1604 | ||
1605 | for (entry = 0; entry < ch->local_nentries; entry++) { | |
1606 | ||
1607 | if (atomic_read(&ch->n_to_notify) == 0) | |
1608 | break; | |
1609 | ||
1610 | msg_slot = &ch->sn.uv.send_msg_slots[entry]; | |
1611 | if (msg_slot->func != NULL) | |
1612 | xpc_notify_sender_uv(ch, msg_slot, ch->reason); | |
1613 | } | |
1614 | } | |
1615 | ||
1616 | /* | |
1617 | * Get the next deliverable message's payload. | |
1618 | */ | |
1619 | static void * | |
1620 | xpc_get_deliverable_payload_uv(struct xpc_channel *ch) | |
1621 | { | |
1622 | struct xpc_fifo_entry_uv *entry; | |
1623 | struct xpc_notify_mq_msg_uv *msg; | |
1624 | void *payload = NULL; | |
1625 | ||
1626 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | |
1627 | entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); | |
1628 | if (entry != NULL) { | |
1629 | msg = container_of(entry, struct xpc_notify_mq_msg_uv, | |
1630 | hdr.u.next); | |
1631 | payload = &msg->payload; | |
1632 | } | |
1633 | } | |
1634 | return payload; | |
1635 | } | |
1636 | ||
1637 | static void | |
1638 | xpc_received_payload_uv(struct xpc_channel *ch, void *payload) | |
e17d416b | 1639 | { |
bd3e64c1 DN |
1640 | struct xpc_notify_mq_msg_uv *msg; |
1641 | enum xp_retval ret; | |
1642 | ||
1643 | msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); | |
1644 | ||
1645 | /* return an ACK to the sender of this message */ | |
1646 | ||
1647 | msg->hdr.partid = xp_partition_id; | |
1648 | msg->hdr.size = 0; /* size of zero indicates this is an ACK */ | |
1649 | ||
6f2584f4 | 1650 | ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, |
bd3e64c1 DN |
1651 | sizeof(struct xpc_notify_mq_msghdr_uv)); |
1652 | if (ret != xpSuccess) | |
1653 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | |
e17d416b DN |
1654 | } |
1655 | ||
a7665b0a RH |
1656 | static struct xpc_arch_operations xpc_arch_ops_uv = { |
1657 | .setup_partitions = xpc_setup_partitions_uv, | |
1658 | .teardown_partitions = xpc_teardown_partitions_uv, | |
1659 | .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, | |
1660 | .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, | |
1661 | .setup_rsvd_page = xpc_setup_rsvd_page_uv, | |
1662 | ||
1663 | .allow_hb = xpc_allow_hb_uv, | |
1664 | .disallow_hb = xpc_disallow_hb_uv, | |
1665 | .disallow_all_hbs = xpc_disallow_all_hbs_uv, | |
1666 | .increment_heartbeat = xpc_increment_heartbeat_uv, | |
1667 | .offline_heartbeat = xpc_offline_heartbeat_uv, | |
1668 | .online_heartbeat = xpc_online_heartbeat_uv, | |
1669 | .heartbeat_init = xpc_heartbeat_init_uv, | |
1670 | .heartbeat_exit = xpc_heartbeat_exit_uv, | |
1671 | .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, | |
1672 | ||
1673 | .request_partition_activation = | |
1674 | xpc_request_partition_activation_uv, | |
1675 | .request_partition_reactivation = | |
1676 | xpc_request_partition_reactivation_uv, | |
1677 | .request_partition_deactivation = | |
1678 | xpc_request_partition_deactivation_uv, | |
1679 | .cancel_partition_deactivation_request = | |
1680 | xpc_cancel_partition_deactivation_request_uv, | |
1681 | ||
1682 | .setup_ch_structures = xpc_setup_ch_structures_uv, | |
1683 | .teardown_ch_structures = xpc_teardown_ch_structures_uv, | |
1684 | ||
1685 | .make_first_contact = xpc_make_first_contact_uv, | |
1686 | ||
1687 | .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, | |
1688 | .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, | |
1689 | .send_chctl_closereply = xpc_send_chctl_closereply_uv, | |
1690 | .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, | |
1691 | .send_chctl_openreply = xpc_send_chctl_openreply_uv, | |
1692 | .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, | |
1693 | .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, | |
1694 | ||
1695 | .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, | |
1696 | ||
1697 | .setup_msg_structures = xpc_setup_msg_structures_uv, | |
1698 | .teardown_msg_structures = xpc_teardown_msg_structures_uv, | |
1699 | ||
1700 | .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, | |
1701 | .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, | |
1702 | .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, | |
1703 | .partition_engaged = xpc_partition_engaged_uv, | |
1704 | .any_partition_engaged = xpc_any_partition_engaged_uv, | |
1705 | ||
1706 | .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, | |
1707 | .send_payload = xpc_send_payload_uv, | |
1708 | .get_deliverable_payload = xpc_get_deliverable_payload_uv, | |
1709 | .received_payload = xpc_received_payload_uv, | |
1710 | .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, | |
1711 | }; | |
1712 | ||
5b8669df | 1713 | int |
94bd2708 DN |
1714 | xpc_init_uv(void) |
1715 | { | |
a7665b0a | 1716 | xpc_arch_ops = xpc_arch_ops_uv; |
bd3e64c1 DN |
1717 | |
1718 | if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { | |
1719 | dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", | |
1720 | XPC_MSG_HDR_MAX_SIZE); | |
1721 | return -E2BIG; | |
1722 | } | |
5b8669df | 1723 | |
2525789b DN |
1724 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, |
1725 | XPC_ACTIVATE_IRQ_NAME, | |
5b8669df | 1726 | xpc_handle_activate_IRQ_uv); |
2525789b DN |
1727 | if (IS_ERR(xpc_activate_mq_uv)) |
1728 | return PTR_ERR(xpc_activate_mq_uv); | |
5b8669df | 1729 | |
2525789b DN |
1730 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, |
1731 | XPC_NOTIFY_IRQ_NAME, | |
bd3e64c1 | 1732 | xpc_handle_notify_IRQ_uv); |
2525789b DN |
1733 | if (IS_ERR(xpc_notify_mq_uv)) { |
1734 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); | |
1735 | return PTR_ERR(xpc_notify_mq_uv); | |
bd3e64c1 DN |
1736 | } |
1737 | ||
5b8669df | 1738 | return 0; |
94bd2708 DN |
1739 | } |
1740 | ||
1741 | void | |
1742 | xpc_exit_uv(void) | |
1743 | { | |
2525789b DN |
1744 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); |
1745 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); | |
94bd2708 | 1746 | } |