2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <asm/uv/uv_hub.h>
23 #if defined CONFIG_X86_64
24 #include <asm/uv/bios.h>
25 #include <asm/uv/uv_irq.h>
26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
30 #include "../sgi-gru/gru.h"
31 #include "../sgi-gru/grukservices.h"
34 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
35 struct uv_IO_APIC_route_entry
{
49 static atomic64_t xpc_heartbeat_uv
;
50 static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv
, XP_MAX_NPARTITIONS_UV
);
52 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
53 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
54 XPC_ACTIVATE_MSG_SIZE_UV)
55 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
57 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
58 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
59 XPC_NOTIFY_MSG_SIZE_UV)
60 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
62 static struct xpc_gru_mq_uv
*xpc_activate_mq_uv
;
63 static struct xpc_gru_mq_uv
*xpc_notify_mq_uv
;
66 xpc_setup_partitions_sn_uv(void)
69 struct xpc_partition_uv
*part_uv
;
71 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
72 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
74 mutex_init(&part_uv
->cached_activate_gru_mq_desc_mutex
);
75 spin_lock_init(&part_uv
->flags_lock
);
76 part_uv
->remote_act_state
= XPC_P_AS_INACTIVE
;
82 xpc_teardown_partitions_sn_uv(void)
85 struct xpc_partition_uv
*part_uv
;
86 unsigned long irq_flags
;
88 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
89 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
91 if (part_uv
->cached_activate_gru_mq_desc
!= NULL
) {
92 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
93 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
94 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
95 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
96 kfree(part_uv
->cached_activate_gru_mq_desc
);
97 part_uv
->cached_activate_gru_mq_desc
= NULL
;
98 mutex_unlock(&part_uv
->
99 cached_activate_gru_mq_desc_mutex
);
105 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
, int cpu
, char *irq_name
)
107 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
109 #if defined CONFIG_X86_64
110 mq
->irq
= uv_setup_irq(irq_name
, cpu
, mq
->mmr_blade
, mq
->mmr_offset
);
112 dev_err(xpc_part
, "uv_setup_irq() returned error=%d\n",
117 mq
->mmr_value
= uv_read_global_mmr64(mmr_pnode
, mq
->mmr_offset
);
119 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
120 if (strcmp(irq_name
, XPC_ACTIVATE_IRQ_NAME
) == 0)
121 mq
->irq
= SGI_XPC_ACTIVATE
;
122 else if (strcmp(irq_name
, XPC_NOTIFY_IRQ_NAME
) == 0)
123 mq
->irq
= SGI_XPC_NOTIFY
;
127 mq
->mmr_value
= (unsigned long)cpu_physical_id(cpu
) << 32 | mq
->irq
;
128 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mq
->mmr_value
);
130 #error not a supported configuration
137 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
)
139 #if defined CONFIG_X86_64
140 uv_teardown_irq(mq
->irq
, mq
->mmr_blade
, mq
->mmr_offset
);
142 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
144 unsigned long mmr_value
;
146 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
147 mmr_value
= 1UL << 16;
149 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
151 #error not a supported configuration
156 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv
*mq
)
160 #if defined CONFIG_X86_64
161 ret
= uv_bios_mq_watchlist_alloc(mq
->mmr_blade
, uv_gpa(mq
->address
),
162 mq
->order
, &mq
->mmr_offset
);
164 dev_err(xpc_part
, "uv_bios_mq_watchlist_alloc() failed, "
168 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
169 ret
= sn_mq_watchlist_alloc(mq
->mmr_blade
, (void *)uv_gpa(mq
->address
),
170 mq
->order
, &mq
->mmr_offset
);
172 dev_err(xpc_part
, "sn_mq_watchlist_alloc() failed, ret=%d\n",
177 #error not a supported configuration
180 mq
->watchlist_num
= ret
;
185 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv
*mq
)
189 #if defined CONFIG_X86_64
190 ret
= uv_bios_mq_watchlist_free(mq
->mmr_blade
, mq
->watchlist_num
);
191 BUG_ON(ret
!= BIOS_STATUS_SUCCESS
);
192 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
193 ret
= sn_mq_watchlist_free(mq
->mmr_blade
, mq
->watchlist_num
);
194 BUG_ON(ret
!= SALRET_OK
);
196 #error not a supported configuration
200 static struct xpc_gru_mq_uv
*
201 xpc_create_gru_mq_uv(unsigned int mq_size
, int cpu
, char *irq_name
,
202 irq_handler_t irq_handler
)
204 enum xp_retval xp_ret
;
209 struct xpc_gru_mq_uv
*mq
;
210 struct uv_IO_APIC_route_entry
*mmr_value
;
212 mq
= kmalloc(sizeof(struct xpc_gru_mq_uv
), GFP_KERNEL
);
214 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
215 "a xpc_gru_mq_uv structure\n");
220 mq
->gru_mq_desc
= kzalloc(sizeof(struct gru_message_queue_desc
),
222 if (mq
->gru_mq_desc
== NULL
) {
223 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
224 "a gru_message_queue_desc structure\n");
229 pg_order
= get_order(mq_size
);
230 mq
->order
= pg_order
+ PAGE_SHIFT
;
231 mq_size
= 1UL << mq
->order
;
233 mq
->mmr_blade
= uv_cpu_to_blade_id(cpu
);
235 nid
= cpu_to_node(cpu
);
236 page
= alloc_pages_node(nid
, GFP_KERNEL
| __GFP_ZERO
| GFP_THISNODE
,
239 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to alloc %d "
240 "bytes of memory on nid=%d for GRU mq\n", mq_size
, nid
);
244 mq
->address
= page_address(page
);
246 /* enable generation of irq when GRU mq operation occurs to this mq */
247 ret
= xpc_gru_mq_watchlist_alloc_uv(mq
);
251 ret
= xpc_get_gru_mq_irq_uv(mq
, cpu
, irq_name
);
255 ret
= request_irq(mq
->irq
, irq_handler
, 0, irq_name
, NULL
);
257 dev_err(xpc_part
, "request_irq(irq=%d) returned error=%d\n",
262 mmr_value
= (struct uv_IO_APIC_route_entry
*)&mq
->mmr_value
;
263 ret
= gru_create_message_queue(mq
->gru_mq_desc
, mq
->address
, mq_size
,
264 nid
, mmr_value
->vector
, mmr_value
->dest
);
266 dev_err(xpc_part
, "gru_create_message_queue() returned "
272 /* allow other partitions to access this GRU mq */
273 xp_ret
= xp_expand_memprotect(xp_pa(mq
->address
), mq_size
);
274 if (xp_ret
!= xpSuccess
) {
281 /* something went wrong */
283 free_irq(mq
->irq
, NULL
);
285 xpc_release_gru_mq_irq_uv(mq
);
287 xpc_gru_mq_watchlist_free_uv(mq
);
289 free_pages((unsigned long)mq
->address
, pg_order
);
291 kfree(mq
->gru_mq_desc
);
299 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv
*mq
)
301 unsigned int mq_size
;
305 /* disallow other partitions to access GRU mq */
306 mq_size
= 1UL << mq
->order
;
307 ret
= xp_restrict_memprotect(xp_pa(mq
->address
), mq_size
);
308 BUG_ON(ret
!= xpSuccess
);
310 /* unregister irq handler and release mq irq/vector mapping */
311 free_irq(mq
->irq
, NULL
);
312 xpc_release_gru_mq_irq_uv(mq
);
314 /* disable generation of irq when GRU mq op occurs to this mq */
315 xpc_gru_mq_watchlist_free_uv(mq
);
317 pg_order
= mq
->order
- PAGE_SHIFT
;
318 free_pages((unsigned long)mq
->address
, pg_order
);
323 static enum xp_retval
324 xpc_send_gru_msg(struct gru_message_queue_desc
*gru_mq_desc
, void *msg
,
327 enum xp_retval xp_ret
;
331 ret
= gru_send_message_gpa(gru_mq_desc
, msg
, msg_size
);
337 if (ret
== MQE_QUEUE_FULL
) {
338 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
339 "error=MQE_QUEUE_FULL\n");
340 /* !!! handle QLimit reached; delay & try again */
341 /* ??? Do we add a limit to the number of retries? */
342 (void)msleep_interruptible(10);
343 } else if (ret
== MQE_CONGESTION
) {
344 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
345 "error=MQE_CONGESTION\n");
346 /* !!! handle LB Overflow; simply try again */
347 /* ??? Do we add a limit to the number of retries? */
349 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
350 dev_err(xpc_chan
, "gru_send_message_gpa() returned "
352 xp_ret
= xpGruSendMqError
;
360 xpc_process_activate_IRQ_rcvd_uv(void)
362 unsigned long irq_flags
;
364 struct xpc_partition
*part
;
367 DBUG_ON(xpc_activate_IRQ_rcvd
== 0);
369 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
370 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
371 part
= &xpc_partitions
[partid
];
373 if (part
->sn
.uv
.act_state_req
== 0)
376 xpc_activate_IRQ_rcvd
--;
377 BUG_ON(xpc_activate_IRQ_rcvd
< 0);
379 act_state_req
= part
->sn
.uv
.act_state_req
;
380 part
->sn
.uv
.act_state_req
= 0;
381 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
383 if (act_state_req
== XPC_P_ASR_ACTIVATE_UV
) {
384 if (part
->act_state
== XPC_P_AS_INACTIVE
)
385 xpc_activate_partition(part
);
386 else if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
387 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
389 } else if (act_state_req
== XPC_P_ASR_REACTIVATE_UV
) {
390 if (part
->act_state
== XPC_P_AS_INACTIVE
)
391 xpc_activate_partition(part
);
393 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
395 } else if (act_state_req
== XPC_P_ASR_DEACTIVATE_UV
) {
396 XPC_DEACTIVATE_PARTITION(part
, part
->sn
.uv
.reason
);
402 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
403 if (xpc_activate_IRQ_rcvd
== 0)
406 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
411 xpc_handle_activate_mq_msg_uv(struct xpc_partition
*part
,
412 struct xpc_activate_mq_msghdr_uv
*msg_hdr
,
413 int *wakeup_hb_checker
)
415 unsigned long irq_flags
;
416 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
417 struct xpc_openclose_args
*args
;
419 part_uv
->remote_act_state
= msg_hdr
->act_state
;
421 switch (msg_hdr
->type
) {
422 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
:
423 /* syncing of remote_act_state was just done above */
426 case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV
: {
427 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
429 msg
= container_of(msg_hdr
,
430 struct xpc_activate_mq_msg_heartbeat_req_uv
,
432 part_uv
->heartbeat
= msg
->heartbeat
;
435 case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
: {
436 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
438 msg
= container_of(msg_hdr
,
439 struct xpc_activate_mq_msg_heartbeat_req_uv
,
441 part_uv
->heartbeat
= msg
->heartbeat
;
443 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
444 part_uv
->flags
|= XPC_P_HEARTBEAT_OFFLINE_UV
;
445 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
448 case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV
: {
449 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
451 msg
= container_of(msg_hdr
,
452 struct xpc_activate_mq_msg_heartbeat_req_uv
,
454 part_uv
->heartbeat
= msg
->heartbeat
;
456 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
457 part_uv
->flags
&= ~XPC_P_HEARTBEAT_OFFLINE_UV
;
458 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
461 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
: {
462 struct xpc_activate_mq_msg_activate_req_uv
*msg
;
465 * ??? Do we deal here with ts_jiffies being different
466 * ??? if act_state != XPC_P_AS_INACTIVE instead of
469 msg
= container_of(msg_hdr
, struct
470 xpc_activate_mq_msg_activate_req_uv
, hdr
);
472 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
473 if (part_uv
->act_state_req
== 0)
474 xpc_activate_IRQ_rcvd
++;
475 part_uv
->act_state_req
= XPC_P_ASR_ACTIVATE_UV
;
476 part
->remote_rp_pa
= msg
->rp_gpa
; /* !!! _pa is _gpa */
477 part
->remote_rp_ts_jiffies
= msg_hdr
->rp_ts_jiffies
;
479 if (msg
->activate_gru_mq_desc_gpa
!=
480 part_uv
->activate_gru_mq_desc_gpa
) {
481 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
482 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
483 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
484 part_uv
->activate_gru_mq_desc_gpa
=
485 msg
->activate_gru_mq_desc_gpa
;
487 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
489 (*wakeup_hb_checker
)++;
492 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
: {
493 struct xpc_activate_mq_msg_deactivate_req_uv
*msg
;
495 msg
= container_of(msg_hdr
, struct
496 xpc_activate_mq_msg_deactivate_req_uv
, hdr
);
498 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
499 if (part_uv
->act_state_req
== 0)
500 xpc_activate_IRQ_rcvd
++;
501 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
502 part_uv
->reason
= msg
->reason
;
503 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
505 (*wakeup_hb_checker
)++;
508 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
: {
509 struct xpc_activate_mq_msg_chctl_closerequest_uv
*msg
;
511 msg
= container_of(msg_hdr
, struct
512 xpc_activate_mq_msg_chctl_closerequest_uv
,
514 args
= &part
->remote_openclose_args
[msg
->ch_number
];
515 args
->reason
= msg
->reason
;
517 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
518 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREQUEST
;
519 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
521 xpc_wakeup_channel_mgr(part
);
524 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
: {
525 struct xpc_activate_mq_msg_chctl_closereply_uv
*msg
;
527 msg
= container_of(msg_hdr
, struct
528 xpc_activate_mq_msg_chctl_closereply_uv
,
531 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
532 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREPLY
;
533 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
535 xpc_wakeup_channel_mgr(part
);
538 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
: {
539 struct xpc_activate_mq_msg_chctl_openrequest_uv
*msg
;
541 msg
= container_of(msg_hdr
, struct
542 xpc_activate_mq_msg_chctl_openrequest_uv
,
544 args
= &part
->remote_openclose_args
[msg
->ch_number
];
545 args
->entry_size
= msg
->entry_size
;
546 args
->local_nentries
= msg
->local_nentries
;
548 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
549 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREQUEST
;
550 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
552 xpc_wakeup_channel_mgr(part
);
555 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
: {
556 struct xpc_activate_mq_msg_chctl_openreply_uv
*msg
;
558 msg
= container_of(msg_hdr
, struct
559 xpc_activate_mq_msg_chctl_openreply_uv
, hdr
);
560 args
= &part
->remote_openclose_args
[msg
->ch_number
];
561 args
->remote_nentries
= msg
->remote_nentries
;
562 args
->local_nentries
= msg
->local_nentries
;
563 args
->local_msgqueue_pa
= msg
->notify_gru_mq_desc_gpa
;
565 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
566 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREPLY
;
567 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
569 xpc_wakeup_channel_mgr(part
);
572 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
:
573 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
574 part_uv
->flags
|= XPC_P_ENGAGED_UV
;
575 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
578 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
:
579 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
580 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
581 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
585 dev_err(xpc_part
, "received unknown activate_mq msg type=%d "
586 "from partition=%d\n", msg_hdr
->type
, XPC_PARTID(part
));
588 /* get hb checker to deactivate from the remote partition */
589 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
590 if (part_uv
->act_state_req
== 0)
591 xpc_activate_IRQ_rcvd
++;
592 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
593 part_uv
->reason
= xpBadMsgType
;
594 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
596 (*wakeup_hb_checker
)++;
600 if (msg_hdr
->rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
&&
601 part
->remote_rp_ts_jiffies
!= 0) {
603 * ??? Does what we do here need to be sensitive to
604 * ??? act_state or remote_act_state?
606 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
607 if (part_uv
->act_state_req
== 0)
608 xpc_activate_IRQ_rcvd
++;
609 part_uv
->act_state_req
= XPC_P_ASR_REACTIVATE_UV
;
610 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
612 (*wakeup_hb_checker
)++;
617 xpc_handle_activate_IRQ_uv(int irq
, void *dev_id
)
619 struct xpc_activate_mq_msghdr_uv
*msg_hdr
;
621 struct xpc_partition
*part
;
622 int wakeup_hb_checker
= 0;
626 msg_hdr
= gru_get_next_message(xpc_activate_mq_uv
->gru_mq_desc
);
630 partid
= msg_hdr
->partid
;
631 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
632 dev_err(xpc_part
, "xpc_handle_activate_IRQ_uv() "
633 "received invalid partid=0x%x in message\n",
636 part
= &xpc_partitions
[partid
];
638 part_referenced
= xpc_part_ref(part
);
639 xpc_handle_activate_mq_msg_uv(part
, msg_hdr
,
642 xpc_part_deref(part
);
645 gru_free_message(xpc_activate_mq_uv
->gru_mq_desc
, msg_hdr
);
648 if (wakeup_hb_checker
)
649 wake_up_interruptible(&xpc_activate_IRQ_wq
);
654 static enum xp_retval
655 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc
*gru_mq_desc
,
656 unsigned long gru_mq_desc_gpa
)
660 ret
= xp_remote_memcpy(uv_gpa(gru_mq_desc
), gru_mq_desc_gpa
,
661 sizeof(struct gru_message_queue_desc
));
662 if (ret
== xpSuccess
)
663 gru_mq_desc
->mq
= NULL
;
668 static enum xp_retval
669 xpc_send_activate_IRQ_uv(struct xpc_partition
*part
, void *msg
, size_t msg_size
,
672 struct xpc_activate_mq_msghdr_uv
*msg_hdr
= msg
;
673 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
674 struct gru_message_queue_desc
*gru_mq_desc
;
675 unsigned long irq_flags
;
678 DBUG_ON(msg_size
> XPC_ACTIVATE_MSG_SIZE_UV
);
680 msg_hdr
->type
= msg_type
;
681 msg_hdr
->partid
= xp_partition_id
;
682 msg_hdr
->act_state
= part
->act_state
;
683 msg_hdr
->rp_ts_jiffies
= xpc_rsvd_page
->ts_jiffies
;
685 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
687 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
)) {
688 gru_mq_desc
= part_uv
->cached_activate_gru_mq_desc
;
689 if (gru_mq_desc
== NULL
) {
690 gru_mq_desc
= kmalloc(sizeof(struct
691 gru_message_queue_desc
),
693 if (gru_mq_desc
== NULL
) {
697 part_uv
->cached_activate_gru_mq_desc
= gru_mq_desc
;
700 ret
= xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc
,
702 activate_gru_mq_desc_gpa
);
703 if (ret
!= xpSuccess
)
706 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
707 part_uv
->flags
|= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
708 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
711 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
712 ret
= xpc_send_gru_msg(part_uv
->cached_activate_gru_mq_desc
, msg
,
714 if (ret
!= xpSuccess
) {
715 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
716 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
))
720 mutex_unlock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
725 xpc_send_activate_IRQ_part_uv(struct xpc_partition
*part
, void *msg
,
726 size_t msg_size
, int msg_type
)
730 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
731 if (unlikely(ret
!= xpSuccess
))
732 XPC_DEACTIVATE_PARTITION(part
, ret
);
736 xpc_send_activate_IRQ_ch_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
,
737 void *msg
, size_t msg_size
, int msg_type
)
739 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
742 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
743 if (unlikely(ret
!= xpSuccess
)) {
744 if (irq_flags
!= NULL
)
745 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
747 XPC_DEACTIVATE_PARTITION(part
, ret
);
749 if (irq_flags
!= NULL
)
750 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
755 xpc_send_local_activate_IRQ_uv(struct xpc_partition
*part
, int act_state_req
)
757 unsigned long irq_flags
;
758 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
761 * !!! Make our side think that the remote partition sent an activate
762 * !!! message our way by doing what the activate IRQ handler would
763 * !!! do had one really been sent.
766 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
767 if (part_uv
->act_state_req
== 0)
768 xpc_activate_IRQ_rcvd
++;
769 part_uv
->act_state_req
= act_state_req
;
770 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
772 wake_up_interruptible(&xpc_activate_IRQ_wq
);
775 static enum xp_retval
776 xpc_get_partition_rsvd_page_pa_uv(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
782 #if defined CONFIG_X86_64
783 status
= uv_bios_reserved_page_pa((u64
)buf
, cookie
, (u64
*)rp_pa
,
785 if (status
== BIOS_STATUS_SUCCESS
)
787 else if (status
== BIOS_STATUS_MORE_PASSES
)
788 ret
= xpNeedMoreInfo
;
792 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
793 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
794 if (status
== SALRET_OK
)
796 else if (status
== SALRET_MORE_PASSES
)
797 ret
= xpNeedMoreInfo
;
802 #error not a supported configuration
809 xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page
*rp
)
811 rp
->sn
.activate_gru_mq_desc_gpa
=
812 uv_gpa(xpc_activate_mq_uv
->gru_mq_desc
);
817 xpc_send_heartbeat_uv(int msg_type
)
820 struct xpc_partition
*part
;
821 struct xpc_activate_mq_msg_heartbeat_req_uv msg
;
824 * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
825 * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
826 * !!! seconds. This is an increase in numalink traffic.
830 msg
.heartbeat
= atomic64_inc_return(&xpc_heartbeat_uv
);
832 partid
= find_first_bit(xpc_heartbeating_to_mask_uv
,
833 XP_MAX_NPARTITIONS_UV
);
835 while (partid
< XP_MAX_NPARTITIONS_UV
) {
836 part
= &xpc_partitions
[partid
];
838 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
841 partid
= find_next_bit(xpc_heartbeating_to_mask_uv
,
842 XP_MAX_NPARTITIONS_UV
, partid
+ 1);
847 xpc_increment_heartbeat_uv(void)
849 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV
);
853 xpc_offline_heartbeat_uv(void)
855 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
);
859 xpc_online_heartbeat_uv(void)
861 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV
);
865 xpc_heartbeat_init_uv(void)
867 atomic64_set(&xpc_heartbeat_uv
, 0);
868 bitmap_zero(xpc_heartbeating_to_mask_uv
, XP_MAX_NPARTITIONS_UV
);
869 xpc_heartbeating_to_mask
= &xpc_heartbeating_to_mask_uv
[0];
873 xpc_heartbeat_exit_uv(void)
875 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
);
878 static enum xp_retval
879 xpc_get_remote_heartbeat_uv(struct xpc_partition
*part
)
881 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
882 enum xp_retval ret
= xpNoHeartbeat
;
884 if (part_uv
->remote_act_state
!= XPC_P_AS_INACTIVE
&&
885 part_uv
->remote_act_state
!= XPC_P_AS_DEACTIVATING
) {
887 if (part_uv
->heartbeat
!= part
->last_heartbeat
||
888 (part_uv
->flags
& XPC_P_HEARTBEAT_OFFLINE_UV
)) {
890 part
->last_heartbeat
= part_uv
->heartbeat
;
898 xpc_request_partition_activation_uv(struct xpc_rsvd_page
*remote_rp
,
899 unsigned long remote_rp_gpa
, int nasid
)
901 short partid
= remote_rp
->SAL_partid
;
902 struct xpc_partition
*part
= &xpc_partitions
[partid
];
903 struct xpc_activate_mq_msg_activate_req_uv msg
;
905 part
->remote_rp_pa
= remote_rp_gpa
; /* !!! _pa here is really _gpa */
906 part
->remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
907 part
->sn
.uv
.activate_gru_mq_desc_gpa
=
908 remote_rp
->sn
.activate_gru_mq_desc_gpa
;
911 * ??? Is it a good idea to make this conditional on what is
912 * ??? potentially stale state information?
914 if (part
->sn
.uv
.remote_act_state
== XPC_P_AS_INACTIVE
) {
915 msg
.rp_gpa
= uv_gpa(xpc_rsvd_page
);
916 msg
.activate_gru_mq_desc_gpa
=
917 xpc_rsvd_page
->sn
.activate_gru_mq_desc_gpa
;
918 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
919 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
);
922 if (part
->act_state
== XPC_P_AS_INACTIVE
)
923 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
927 xpc_request_partition_reactivation_uv(struct xpc_partition
*part
)
929 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
933 xpc_request_partition_deactivation_uv(struct xpc_partition
*part
)
935 struct xpc_activate_mq_msg_deactivate_req_uv msg
;
938 * ??? Is it a good idea to make this conditional on what is
939 * ??? potentially stale state information?
941 if (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_DEACTIVATING
&&
942 part
->sn
.uv
.remote_act_state
!= XPC_P_AS_INACTIVE
) {
944 msg
.reason
= part
->reason
;
945 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
946 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
);
951 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition
*part
)
953 /* nothing needs to be done */
958 xpc_init_fifo_uv(struct xpc_fifo_head_uv
*head
)
962 spin_lock_init(&head
->lock
);
967 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv
*head
)
969 unsigned long irq_flags
;
970 struct xpc_fifo_entry_uv
*first
;
972 spin_lock_irqsave(&head
->lock
, irq_flags
);
974 if (head
->first
!= NULL
) {
975 head
->first
= first
->next
;
976 if (head
->first
== NULL
)
980 BUG_ON(head
->n_entries
< 0);
981 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
987 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv
*head
,
988 struct xpc_fifo_entry_uv
*last
)
990 unsigned long irq_flags
;
993 spin_lock_irqsave(&head
->lock
, irq_flags
);
994 if (head
->last
!= NULL
)
995 head
->last
->next
= last
;
1000 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
1004 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv
*head
)
1006 return head
->n_entries
;
1010 * Setup the channel structures that are uv specific.
1012 static enum xp_retval
1013 xpc_setup_ch_structures_sn_uv(struct xpc_partition
*part
)
1015 struct xpc_channel_uv
*ch_uv
;
1018 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
1019 ch_uv
= &part
->channels
[ch_number
].sn
.uv
;
1021 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1022 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1029 * Teardown the channel structures that are uv specific.
1032 xpc_teardown_ch_structures_sn_uv(struct xpc_partition
*part
)
1034 /* nothing needs to be done */
1038 static enum xp_retval
1039 xpc_make_first_contact_uv(struct xpc_partition
*part
)
1041 struct xpc_activate_mq_msg_uv msg
;
1044 * We send a sync msg to get the remote partition's remote_act_state
1045 * updated to our current act_state which at this point should
1046 * be XPC_P_AS_ACTIVATING.
1048 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1049 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
);
1051 while (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_ACTIVATING
) {
1053 dev_dbg(xpc_part
, "waiting to make first contact with "
1054 "partition %d\n", XPC_PARTID(part
));
1056 /* wait a 1/4 of a second or so */
1057 (void)msleep_interruptible(250);
1059 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1060 return part
->reason
;
1067 xpc_get_chctl_all_flags_uv(struct xpc_partition
*part
)
1069 unsigned long irq_flags
;
1070 union xpc_channel_ctl_flags chctl
;
1072 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1073 chctl
= part
->chctl
;
1074 if (chctl
.all_flags
!= 0)
1075 part
->chctl
.all_flags
= 0;
1077 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1078 return chctl
.all_flags
;
1081 static enum xp_retval
1082 xpc_allocate_send_msg_slot_uv(struct xpc_channel
*ch
)
1084 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1085 struct xpc_send_msg_slot_uv
*msg_slot
;
1086 unsigned long irq_flags
;
1091 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
1092 nbytes
= nentries
* sizeof(struct xpc_send_msg_slot_uv
);
1093 ch_uv
->send_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1094 if (ch_uv
->send_msg_slots
== NULL
)
1097 for (entry
= 0; entry
< nentries
; entry
++) {
1098 msg_slot
= &ch_uv
->send_msg_slots
[entry
];
1100 msg_slot
->msg_slot_number
= entry
;
1101 xpc_put_fifo_entry_uv(&ch_uv
->msg_slot_free_list
,
1105 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1106 if (nentries
< ch
->local_nentries
)
1107 ch
->local_nentries
= nentries
;
1108 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1115 static enum xp_retval
1116 xpc_allocate_recv_msg_slot_uv(struct xpc_channel
*ch
)
1118 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1119 struct xpc_notify_mq_msg_uv
*msg_slot
;
1120 unsigned long irq_flags
;
1125 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1126 nbytes
= nentries
* ch
->entry_size
;
1127 ch_uv
->recv_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1128 if (ch_uv
->recv_msg_slots
== NULL
)
1131 for (entry
= 0; entry
< nentries
; entry
++) {
1132 msg_slot
= ch_uv
->recv_msg_slots
+
1133 entry
* ch
->entry_size
;
1135 msg_slot
->hdr
.msg_slot_number
= entry
;
1138 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1139 if (nentries
< ch
->remote_nentries
)
1140 ch
->remote_nentries
= nentries
;
1141 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1149 * Allocate msg_slots associated with the channel.
1151 static enum xp_retval
1152 xpc_setup_msg_structures_uv(struct xpc_channel
*ch
)
1154 static enum xp_retval ret
;
1155 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1157 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1159 ch_uv
->cached_notify_gru_mq_desc
= kmalloc(sizeof(struct
1160 gru_message_queue_desc
),
1162 if (ch_uv
->cached_notify_gru_mq_desc
== NULL
)
1165 ret
= xpc_allocate_send_msg_slot_uv(ch
);
1166 if (ret
== xpSuccess
) {
1168 ret
= xpc_allocate_recv_msg_slot_uv(ch
);
1169 if (ret
!= xpSuccess
) {
1170 kfree(ch_uv
->send_msg_slots
);
1171 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1178 * Free up msg_slots and clear other stuff that were setup for the specified
1182 xpc_teardown_msg_structures_uv(struct xpc_channel
*ch
)
1184 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1186 DBUG_ON(!spin_is_locked(&ch
->lock
));
1188 kfree(ch_uv
->cached_notify_gru_mq_desc
);
1189 ch_uv
->cached_notify_gru_mq_desc
= NULL
;
1191 if (ch
->flags
& XPC_C_SETUP
) {
1192 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1193 kfree(ch_uv
->send_msg_slots
);
1194 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1195 kfree(ch_uv
->recv_msg_slots
);
1200 xpc_send_chctl_closerequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1202 struct xpc_activate_mq_msg_chctl_closerequest_uv msg
;
1204 msg
.ch_number
= ch
->number
;
1205 msg
.reason
= ch
->reason
;
1206 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1207 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
);
1211 xpc_send_chctl_closereply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1213 struct xpc_activate_mq_msg_chctl_closereply_uv msg
;
1215 msg
.ch_number
= ch
->number
;
1216 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1217 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
);
1221 xpc_send_chctl_openrequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1223 struct xpc_activate_mq_msg_chctl_openrequest_uv msg
;
1225 msg
.ch_number
= ch
->number
;
1226 msg
.entry_size
= ch
->entry_size
;
1227 msg
.local_nentries
= ch
->local_nentries
;
1228 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1229 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
);
1233 xpc_send_chctl_openreply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1235 struct xpc_activate_mq_msg_chctl_openreply_uv msg
;
1237 msg
.ch_number
= ch
->number
;
1238 msg
.local_nentries
= ch
->local_nentries
;
1239 msg
.remote_nentries
= ch
->remote_nentries
;
1240 msg
.notify_gru_mq_desc_gpa
= uv_gpa(xpc_notify_mq_uv
->gru_mq_desc
);
1241 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1242 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
);
1246 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition
*part
, int ch_number
)
1248 unsigned long irq_flags
;
1250 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1251 part
->chctl
.flags
[ch_number
] |= XPC_CHCTL_MSGREQUEST
;
1252 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1254 xpc_wakeup_channel_mgr(part
);
1257 static enum xp_retval
1258 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel
*ch
,
1259 unsigned long gru_mq_desc_gpa
)
1261 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1263 DBUG_ON(ch_uv
->cached_notify_gru_mq_desc
== NULL
);
1264 return xpc_cache_remote_gru_mq_desc_uv(ch_uv
->cached_notify_gru_mq_desc
,
1269 xpc_indicate_partition_engaged_uv(struct xpc_partition
*part
)
1271 struct xpc_activate_mq_msg_uv msg
;
1273 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1274 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
);
1278 xpc_indicate_partition_disengaged_uv(struct xpc_partition
*part
)
1280 struct xpc_activate_mq_msg_uv msg
;
1282 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1283 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
);
1287 xpc_assume_partition_disengaged_uv(short partid
)
1289 struct xpc_partition_uv
*part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1290 unsigned long irq_flags
;
1292 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
1293 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
1294 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
1298 xpc_partition_engaged_uv(short partid
)
1300 return (xpc_partitions
[partid
].sn
.uv
.flags
& XPC_P_ENGAGED_UV
) != 0;
1304 xpc_any_partition_engaged_uv(void)
1306 struct xpc_partition_uv
*part_uv
;
1309 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
1310 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1311 if ((part_uv
->flags
& XPC_P_ENGAGED_UV
) != 0)
1317 static enum xp_retval
1318 xpc_allocate_msg_slot_uv(struct xpc_channel
*ch
, u32 flags
,
1319 struct xpc_send_msg_slot_uv
**address_of_msg_slot
)
1322 struct xpc_send_msg_slot_uv
*msg_slot
;
1323 struct xpc_fifo_entry_uv
*entry
;
1326 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
);
1330 if (flags
& XPC_NOWAIT
)
1333 ret
= xpc_allocate_msg_wait(ch
);
1334 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1338 msg_slot
= container_of(entry
, struct xpc_send_msg_slot_uv
, next
);
1339 *address_of_msg_slot
= msg_slot
;
1344 xpc_free_msg_slot_uv(struct xpc_channel
*ch
,
1345 struct xpc_send_msg_slot_uv
*msg_slot
)
1347 xpc_put_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
, &msg_slot
->next
);
1349 /* wakeup anyone waiting for a free msg slot */
1350 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1351 wake_up(&ch
->msg_allocate_wq
);
1355 xpc_notify_sender_uv(struct xpc_channel
*ch
,
1356 struct xpc_send_msg_slot_uv
*msg_slot
,
1357 enum xp_retval reason
)
1359 xpc_notify_func func
= msg_slot
->func
;
1361 if (func
!= NULL
&& cmpxchg(&msg_slot
->func
, func
, NULL
) == func
) {
1363 atomic_dec(&ch
->n_to_notify
);
1365 dev_dbg(xpc_chan
, "msg_slot->func() called, msg_slot=0x%p "
1366 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1367 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1369 func(reason
, ch
->partid
, ch
->number
, msg_slot
->key
);
1371 dev_dbg(xpc_chan
, "msg_slot->func() returned, msg_slot=0x%p "
1372 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1373 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1378 xpc_handle_notify_mq_ack_uv(struct xpc_channel
*ch
,
1379 struct xpc_notify_mq_msg_uv
*msg
)
1381 struct xpc_send_msg_slot_uv
*msg_slot
;
1382 int entry
= msg
->hdr
.msg_slot_number
% ch
->local_nentries
;
1384 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1386 BUG_ON(msg_slot
->msg_slot_number
!= msg
->hdr
.msg_slot_number
);
1387 msg_slot
->msg_slot_number
+= ch
->local_nentries
;
1389 if (msg_slot
->func
!= NULL
)
1390 xpc_notify_sender_uv(ch
, msg_slot
, xpMsgDelivered
);
1392 xpc_free_msg_slot_uv(ch
, msg_slot
);
1396 xpc_handle_notify_mq_msg_uv(struct xpc_partition
*part
,
1397 struct xpc_notify_mq_msg_uv
*msg
)
1399 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
1400 struct xpc_channel
*ch
;
1401 struct xpc_channel_uv
*ch_uv
;
1402 struct xpc_notify_mq_msg_uv
*msg_slot
;
1403 unsigned long irq_flags
;
1404 int ch_number
= msg
->hdr
.ch_number
;
1406 if (unlikely(ch_number
>= part
->nchannels
)) {
1407 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received invalid "
1408 "channel number=0x%x in message from partid=%d\n",
1409 ch_number
, XPC_PARTID(part
));
1411 /* get hb checker to deactivate from the remote partition */
1412 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1413 if (part_uv
->act_state_req
== 0)
1414 xpc_activate_IRQ_rcvd
++;
1415 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
1416 part_uv
->reason
= xpBadChannelNumber
;
1417 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1419 wake_up_interruptible(&xpc_activate_IRQ_wq
);
1423 ch
= &part
->channels
[ch_number
];
1424 xpc_msgqueue_ref(ch
);
1426 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1427 xpc_msgqueue_deref(ch
);
1431 /* see if we're really dealing with an ACK for a previously sent msg */
1432 if (msg
->hdr
.size
== 0) {
1433 xpc_handle_notify_mq_ack_uv(ch
, msg
);
1434 xpc_msgqueue_deref(ch
);
1438 /* we're dealing with a normal message sent via the notify_mq */
1441 msg_slot
= ch_uv
->recv_msg_slots
+
1442 (msg
->hdr
.msg_slot_number
% ch
->remote_nentries
) * ch
->entry_size
;
1444 BUG_ON(msg
->hdr
.msg_slot_number
!= msg_slot
->hdr
.msg_slot_number
);
1445 BUG_ON(msg_slot
->hdr
.size
!= 0);
1447 memcpy(msg_slot
, msg
, msg
->hdr
.size
);
1449 xpc_put_fifo_entry_uv(&ch_uv
->recv_msg_list
, &msg_slot
->hdr
.u
.next
);
1451 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) {
1453 * If there is an existing idle kthread get it to deliver
1454 * the payload, otherwise we'll have to get the channel mgr
1455 * for this partition to create a kthread to do the delivery.
1457 if (atomic_read(&ch
->kthreads_idle
) > 0)
1458 wake_up_nr(&ch
->idle_wq
, 1);
1460 xpc_send_chctl_local_msgrequest_uv(part
, ch
->number
);
1462 xpc_msgqueue_deref(ch
);
1466 xpc_handle_notify_IRQ_uv(int irq
, void *dev_id
)
1468 struct xpc_notify_mq_msg_uv
*msg
;
1470 struct xpc_partition
*part
;
1472 while ((msg
= gru_get_next_message(xpc_notify_mq_uv
->gru_mq_desc
)) !=
1475 partid
= msg
->hdr
.partid
;
1476 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
1477 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received "
1478 "invalid partid=0x%x in message\n", partid
);
1480 part
= &xpc_partitions
[partid
];
1482 if (xpc_part_ref(part
)) {
1483 xpc_handle_notify_mq_msg_uv(part
, msg
);
1484 xpc_part_deref(part
);
1488 gru_free_message(xpc_notify_mq_uv
->gru_mq_desc
, msg
);
1495 xpc_n_of_deliverable_payloads_uv(struct xpc_channel
*ch
)
1497 return xpc_n_of_fifo_entries_uv(&ch
->sn
.uv
.recv_msg_list
);
1501 xpc_process_msg_chctl_flags_uv(struct xpc_partition
*part
, int ch_number
)
1503 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1504 int ndeliverable_payloads
;
1506 xpc_msgqueue_ref(ch
);
1508 ndeliverable_payloads
= xpc_n_of_deliverable_payloads_uv(ch
);
1510 if (ndeliverable_payloads
> 0 &&
1511 (ch
->flags
& XPC_C_CONNECTED
) &&
1512 (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)) {
1514 xpc_activate_kthreads(ch
, ndeliverable_payloads
);
1517 xpc_msgqueue_deref(ch
);
1520 static enum xp_retval
1521 xpc_send_payload_uv(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1522 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1525 enum xp_retval ret
= xpSuccess
;
1526 struct xpc_send_msg_slot_uv
*msg_slot
= NULL
;
1527 struct xpc_notify_mq_msg_uv
*msg
;
1528 u8 msg_buffer
[XPC_NOTIFY_MSG_SIZE_UV
];
1531 DBUG_ON(notify_type
!= XPC_N_CALL
);
1533 msg_size
= sizeof(struct xpc_notify_mq_msghdr_uv
) + payload_size
;
1534 if (msg_size
> ch
->entry_size
)
1535 return xpPayloadTooBig
;
1537 xpc_msgqueue_ref(ch
);
1539 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1543 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1544 ret
= xpNotConnected
;
1548 ret
= xpc_allocate_msg_slot_uv(ch
, flags
, &msg_slot
);
1549 if (ret
!= xpSuccess
)
1553 atomic_inc(&ch
->n_to_notify
);
1555 msg_slot
->key
= key
;
1556 smp_wmb(); /* a non-NULL func must hit memory after the key */
1557 msg_slot
->func
= func
;
1559 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1565 msg
= (struct xpc_notify_mq_msg_uv
*)&msg_buffer
;
1566 msg
->hdr
.partid
= xp_partition_id
;
1567 msg
->hdr
.ch_number
= ch
->number
;
1568 msg
->hdr
.size
= msg_size
;
1569 msg
->hdr
.msg_slot_number
= msg_slot
->msg_slot_number
;
1570 memcpy(&msg
->payload
, payload
, payload_size
);
1572 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1574 if (ret
== xpSuccess
)
1577 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1581 * Try to NULL the msg_slot's func field. If we fail, then
1582 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1583 * case we need to pretend we succeeded to send the message
1584 * since the user will get a callout for the disconnect error
1585 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1586 * error returned here will confuse them. Additionally, since
1587 * in this case the channel is being disconnected we don't need
1588 * to put the the msg_slot back on the free list.
1590 if (cmpxchg(&msg_slot
->func
, func
, NULL
) != func
) {
1595 msg_slot
->key
= NULL
;
1596 atomic_dec(&ch
->n_to_notify
);
1598 xpc_free_msg_slot_uv(ch
, msg_slot
);
1600 xpc_msgqueue_deref(ch
);
1605 * Tell the callers of xpc_send_notify() that the status of their payloads
1606 * is unknown because the channel is now disconnecting.
1608 * We don't worry about putting these msg_slots on the free list since the
1609 * msg_slots themselves are about to be kfree'd.
1612 xpc_notify_senders_of_disconnect_uv(struct xpc_channel
*ch
)
1614 struct xpc_send_msg_slot_uv
*msg_slot
;
1617 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTING
));
1619 for (entry
= 0; entry
< ch
->local_nentries
; entry
++) {
1621 if (atomic_read(&ch
->n_to_notify
) == 0)
1624 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1625 if (msg_slot
->func
!= NULL
)
1626 xpc_notify_sender_uv(ch
, msg_slot
, ch
->reason
);
1631 * Get the next deliverable message's payload.
1634 xpc_get_deliverable_payload_uv(struct xpc_channel
*ch
)
1636 struct xpc_fifo_entry_uv
*entry
;
1637 struct xpc_notify_mq_msg_uv
*msg
;
1638 void *payload
= NULL
;
1640 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
1641 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.recv_msg_list
);
1642 if (entry
!= NULL
) {
1643 msg
= container_of(entry
, struct xpc_notify_mq_msg_uv
,
1645 payload
= &msg
->payload
;
1652 xpc_received_payload_uv(struct xpc_channel
*ch
, void *payload
)
1654 struct xpc_notify_mq_msg_uv
*msg
;
1657 msg
= container_of(payload
, struct xpc_notify_mq_msg_uv
, payload
);
1659 /* return an ACK to the sender of this message */
1661 msg
->hdr
.partid
= xp_partition_id
;
1662 msg
->hdr
.size
= 0; /* size of zero indicates this is an ACK */
1664 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1665 sizeof(struct xpc_notify_mq_msghdr_uv
));
1666 if (ret
!= xpSuccess
)
1667 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1669 msg
->hdr
.msg_slot_number
+= ch
->remote_nentries
;
1675 xpc_setup_partitions_sn
= xpc_setup_partitions_sn_uv
;
1676 xpc_teardown_partitions_sn
= xpc_teardown_partitions_sn_uv
;
1677 xpc_process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_uv
;
1678 xpc_get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_uv
;
1679 xpc_setup_rsvd_page_sn
= xpc_setup_rsvd_page_sn_uv
;
1680 xpc_increment_heartbeat
= xpc_increment_heartbeat_uv
;
1681 xpc_offline_heartbeat
= xpc_offline_heartbeat_uv
;
1682 xpc_online_heartbeat
= xpc_online_heartbeat_uv
;
1683 xpc_heartbeat_init
= xpc_heartbeat_init_uv
;
1684 xpc_heartbeat_exit
= xpc_heartbeat_exit_uv
;
1685 xpc_get_remote_heartbeat
= xpc_get_remote_heartbeat_uv
;
1687 xpc_request_partition_activation
= xpc_request_partition_activation_uv
;
1688 xpc_request_partition_reactivation
=
1689 xpc_request_partition_reactivation_uv
;
1690 xpc_request_partition_deactivation
=
1691 xpc_request_partition_deactivation_uv
;
1692 xpc_cancel_partition_deactivation_request
=
1693 xpc_cancel_partition_deactivation_request_uv
;
1695 xpc_setup_ch_structures_sn
= xpc_setup_ch_structures_sn_uv
;
1696 xpc_teardown_ch_structures_sn
= xpc_teardown_ch_structures_sn_uv
;
1698 xpc_make_first_contact
= xpc_make_first_contact_uv
;
1700 xpc_get_chctl_all_flags
= xpc_get_chctl_all_flags_uv
;
1701 xpc_send_chctl_closerequest
= xpc_send_chctl_closerequest_uv
;
1702 xpc_send_chctl_closereply
= xpc_send_chctl_closereply_uv
;
1703 xpc_send_chctl_openrequest
= xpc_send_chctl_openrequest_uv
;
1704 xpc_send_chctl_openreply
= xpc_send_chctl_openreply_uv
;
1706 xpc_save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_uv
;
1708 xpc_setup_msg_structures
= xpc_setup_msg_structures_uv
;
1709 xpc_teardown_msg_structures
= xpc_teardown_msg_structures_uv
;
1711 xpc_indicate_partition_engaged
= xpc_indicate_partition_engaged_uv
;
1712 xpc_indicate_partition_disengaged
=
1713 xpc_indicate_partition_disengaged_uv
;
1714 xpc_assume_partition_disengaged
= xpc_assume_partition_disengaged_uv
;
1715 xpc_partition_engaged
= xpc_partition_engaged_uv
;
1716 xpc_any_partition_engaged
= xpc_any_partition_engaged_uv
;
1718 xpc_n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_uv
;
1719 xpc_process_msg_chctl_flags
= xpc_process_msg_chctl_flags_uv
;
1720 xpc_send_payload
= xpc_send_payload_uv
;
1721 xpc_notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_uv
;
1722 xpc_get_deliverable_payload
= xpc_get_deliverable_payload_uv
;
1723 xpc_received_payload
= xpc_received_payload_uv
;
1725 if (sizeof(struct xpc_notify_mq_msghdr_uv
) > XPC_MSG_HDR_MAX_SIZE
) {
1726 dev_err(xpc_part
, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1727 XPC_MSG_HDR_MAX_SIZE
);
1731 xpc_activate_mq_uv
= xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV
, 0,
1732 XPC_ACTIVATE_IRQ_NAME
,
1733 xpc_handle_activate_IRQ_uv
);
1734 if (IS_ERR(xpc_activate_mq_uv
))
1735 return PTR_ERR(xpc_activate_mq_uv
);
1737 xpc_notify_mq_uv
= xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV
, 0,
1738 XPC_NOTIFY_IRQ_NAME
,
1739 xpc_handle_notify_IRQ_uv
);
1740 if (IS_ERR(xpc_notify_mq_uv
)) {
1741 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1742 return PTR_ERR(xpc_notify_mq_uv
);
1751 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv
);
1752 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);