2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/cpu.h>
22 #include <linux/module.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
25 #include <linux/numa.h>
26 #include <asm/uv/uv_hub.h>
27 #if defined CONFIG_X86_64
28 #include <asm/uv/bios.h>
29 #include <asm/uv/uv_irq.h>
30 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
31 #include <asm/sn/intr.h>
32 #include <asm/sn/sn_sal.h>
34 #include "../sgi-gru/gru.h"
35 #include "../sgi-gru/grukservices.h"
38 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
39 struct uv_IO_APIC_route_entry {
53 static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
55 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
56 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
57 XPC_ACTIVATE_MSG_SIZE_UV)
58 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
60 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
61 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
62 XPC_NOTIFY_MSG_SIZE_UV)
63 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
65 static int xpc_mq_node = NUMA_NO_NODE;
67 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
68 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
71 xpc_setup_partitions_uv(void)
74 struct xpc_partition_uv *part_uv;
76 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
77 part_uv = &xpc_partitions[partid].sn.uv;
79 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
80 spin_lock_init(&part_uv->flags_lock);
81 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
87 xpc_teardown_partitions_uv(void)
90 struct xpc_partition_uv *part_uv;
91 unsigned long irq_flags;
93 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
94 part_uv = &xpc_partitions[partid].sn.uv;
96 if (part_uv->cached_activate_gru_mq_desc != NULL) {
97 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
98 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
99 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
100 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
101 kfree(part_uv->cached_activate_gru_mq_desc);
102 part_uv->cached_activate_gru_mq_desc = NULL;
103 mutex_unlock(&part_uv->
104 cached_activate_gru_mq_desc_mutex);
110 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
112 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
114 #if defined CONFIG_X86_64
115 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
120 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
122 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
123 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
124 mq->irq = SGI_XPC_ACTIVATE;
125 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
126 mq->irq = SGI_XPC_NOTIFY;
130 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
131 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
133 #error not a supported configuration
140 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
142 #if defined CONFIG_X86_64
143 uv_teardown_irq(mq->irq);
145 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
147 unsigned long mmr_value;
149 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
150 mmr_value = 1UL << 16;
152 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
154 #error not a supported configuration
159 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
163 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
164 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
166 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
167 mq->order, &mq->mmr_offset);
169 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
173 #elif defined CONFIG_X86_64
174 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
175 mq->order, &mq->mmr_offset);
177 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
182 #error not a supported configuration
185 mq->watchlist_num = ret;
190 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
193 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
195 #if defined CONFIG_X86_64
196 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
197 BUG_ON(ret != BIOS_STATUS_SUCCESS);
198 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
199 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
200 BUG_ON(ret != SALRET_OK);
202 #error not a supported configuration
206 static struct xpc_gru_mq_uv *
207 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
208 irq_handler_t irq_handler)
210 enum xp_retval xp_ret;
216 struct xpc_gru_mq_uv *mq;
217 struct uv_IO_APIC_route_entry *mmr_value;
219 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
221 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
222 "a xpc_gru_mq_uv structure\n");
227 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
229 if (mq->gru_mq_desc == NULL) {
230 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
231 "a gru_message_queue_desc structure\n");
236 pg_order = get_order(mq_size);
237 mq->order = pg_order + PAGE_SHIFT;
238 mq_size = 1UL << mq->order;
240 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
242 nid = cpu_to_node(cpu);
243 page = __alloc_pages_node(nid,
244 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
247 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
248 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
252 mq->address = page_address(page);
254 /* enable generation of irq when GRU mq operation occurs to this mq */
255 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
259 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
263 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
265 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
270 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
272 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
273 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
274 nasid, mmr_value->vector, mmr_value->dest);
276 dev_err(xpc_part, "gru_create_message_queue() returned "
282 /* allow other partitions to access this GRU mq */
283 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
284 if (xp_ret != xpSuccess) {
291 /* something went wrong */
293 free_irq(mq->irq, NULL);
295 xpc_release_gru_mq_irq_uv(mq);
297 xpc_gru_mq_watchlist_free_uv(mq);
299 free_pages((unsigned long)mq->address, pg_order);
301 kfree(mq->gru_mq_desc);
309 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
311 unsigned int mq_size;
315 /* disallow other partitions to access GRU mq */
316 mq_size = 1UL << mq->order;
317 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
318 BUG_ON(ret != xpSuccess);
320 /* unregister irq handler and release mq irq/vector mapping */
321 free_irq(mq->irq, NULL);
322 xpc_release_gru_mq_irq_uv(mq);
324 /* disable generation of irq when GRU mq op occurs to this mq */
325 xpc_gru_mq_watchlist_free_uv(mq);
327 pg_order = mq->order - PAGE_SHIFT;
328 free_pages((unsigned long)mq->address, pg_order);
333 static enum xp_retval
334 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
337 enum xp_retval xp_ret;
341 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
347 if (ret == MQE_QUEUE_FULL) {
348 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
349 "error=MQE_QUEUE_FULL\n");
350 /* !!! handle QLimit reached; delay & try again */
351 /* ??? Do we add a limit to the number of retries? */
352 (void)msleep_interruptible(10);
353 } else if (ret == MQE_CONGESTION) {
354 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
355 "error=MQE_CONGESTION\n");
356 /* !!! handle LB Overflow; simply try again */
357 /* ??? Do we add a limit to the number of retries? */
359 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
360 dev_err(xpc_chan, "gru_send_message_gpa() returned "
362 xp_ret = xpGruSendMqError;
370 xpc_process_activate_IRQ_rcvd_uv(void)
372 unsigned long irq_flags;
374 struct xpc_partition *part;
377 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
379 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
380 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
381 part = &xpc_partitions[partid];
383 if (part->sn.uv.act_state_req == 0)
386 xpc_activate_IRQ_rcvd--;
387 BUG_ON(xpc_activate_IRQ_rcvd < 0);
389 act_state_req = part->sn.uv.act_state_req;
390 part->sn.uv.act_state_req = 0;
391 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
393 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
394 if (part->act_state == XPC_P_AS_INACTIVE)
395 xpc_activate_partition(part);
396 else if (part->act_state == XPC_P_AS_DEACTIVATING)
397 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
399 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
400 if (part->act_state == XPC_P_AS_INACTIVE)
401 xpc_activate_partition(part);
403 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
405 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
406 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
412 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
413 if (xpc_activate_IRQ_rcvd == 0)
416 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
421 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
422 struct xpc_activate_mq_msghdr_uv *msg_hdr,
424 int *wakeup_hb_checker)
426 unsigned long irq_flags;
427 struct xpc_partition_uv *part_uv = &part->sn.uv;
428 struct xpc_openclose_args *args;
430 part_uv->remote_act_state = msg_hdr->act_state;
432 switch (msg_hdr->type) {
433 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
434 /* syncing of remote_act_state was just done above */
437 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
438 struct xpc_activate_mq_msg_activate_req_uv *msg;
441 * ??? Do we deal here with ts_jiffies being different
442 * ??? if act_state != XPC_P_AS_INACTIVE instead of
445 msg = container_of(msg_hdr, struct
446 xpc_activate_mq_msg_activate_req_uv, hdr);
448 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
449 if (part_uv->act_state_req == 0)
450 xpc_activate_IRQ_rcvd++;
451 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
452 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
453 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
454 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
456 if (msg->activate_gru_mq_desc_gpa !=
457 part_uv->activate_gru_mq_desc_gpa) {
458 spin_lock(&part_uv->flags_lock);
459 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
460 spin_unlock(&part_uv->flags_lock);
461 part_uv->activate_gru_mq_desc_gpa =
462 msg->activate_gru_mq_desc_gpa;
464 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
466 (*wakeup_hb_checker)++;
469 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
470 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
472 msg = container_of(msg_hdr, struct
473 xpc_activate_mq_msg_deactivate_req_uv, hdr);
475 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
476 if (part_uv->act_state_req == 0)
477 xpc_activate_IRQ_rcvd++;
478 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
479 part_uv->reason = msg->reason;
480 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
482 (*wakeup_hb_checker)++;
485 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
486 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
491 msg = container_of(msg_hdr, struct
492 xpc_activate_mq_msg_chctl_closerequest_uv,
494 args = &part->remote_openclose_args[msg->ch_number];
495 args->reason = msg->reason;
497 spin_lock_irqsave(&part->chctl_lock, irq_flags);
498 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
499 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
501 xpc_wakeup_channel_mgr(part);
504 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
505 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
510 msg = container_of(msg_hdr, struct
511 xpc_activate_mq_msg_chctl_closereply_uv,
514 spin_lock_irqsave(&part->chctl_lock, irq_flags);
515 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
516 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
518 xpc_wakeup_channel_mgr(part);
521 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
522 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
527 msg = container_of(msg_hdr, struct
528 xpc_activate_mq_msg_chctl_openrequest_uv,
530 args = &part->remote_openclose_args[msg->ch_number];
531 args->entry_size = msg->entry_size;
532 args->local_nentries = msg->local_nentries;
534 spin_lock_irqsave(&part->chctl_lock, irq_flags);
535 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
536 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
538 xpc_wakeup_channel_mgr(part);
541 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
542 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
547 msg = container_of(msg_hdr, struct
548 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
549 args = &part->remote_openclose_args[msg->ch_number];
550 args->remote_nentries = msg->remote_nentries;
551 args->local_nentries = msg->local_nentries;
552 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
554 spin_lock_irqsave(&part->chctl_lock, irq_flags);
555 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
556 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
558 xpc_wakeup_channel_mgr(part);
561 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
562 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
567 msg = container_of(msg_hdr, struct
568 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
569 spin_lock_irqsave(&part->chctl_lock, irq_flags);
570 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
571 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
573 xpc_wakeup_channel_mgr(part);
575 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
576 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
577 part_uv->flags |= XPC_P_ENGAGED_UV;
578 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
581 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
582 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
583 part_uv->flags &= ~XPC_P_ENGAGED_UV;
584 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
588 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
589 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
591 /* get hb checker to deactivate from the remote partition */
592 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
593 if (part_uv->act_state_req == 0)
594 xpc_activate_IRQ_rcvd++;
595 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
596 part_uv->reason = xpBadMsgType;
597 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
599 (*wakeup_hb_checker)++;
603 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
604 part->remote_rp_ts_jiffies != 0) {
606 * ??? Does what we do here need to be sensitive to
607 * ??? act_state or remote_act_state?
609 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
610 if (part_uv->act_state_req == 0)
611 xpc_activate_IRQ_rcvd++;
612 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
613 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
615 (*wakeup_hb_checker)++;
620 xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
622 struct xpc_activate_mq_msghdr_uv *msg_hdr;
624 struct xpc_partition *part;
625 int wakeup_hb_checker = 0;
629 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
633 partid = msg_hdr->partid;
634 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
635 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
636 "received invalid partid=0x%x in message\n",
639 part = &xpc_partitions[partid];
641 part_referenced = xpc_part_ref(part);
642 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
646 xpc_part_deref(part);
649 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
652 if (wakeup_hb_checker)
653 wake_up_interruptible(&xpc_activate_IRQ_wq);
658 static enum xp_retval
659 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
660 unsigned long gru_mq_desc_gpa)
664 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
665 sizeof(struct gru_message_queue_desc));
666 if (ret == xpSuccess)
667 gru_mq_desc->mq = NULL;
672 static enum xp_retval
673 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
676 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
677 struct xpc_partition_uv *part_uv = &part->sn.uv;
678 struct gru_message_queue_desc *gru_mq_desc;
679 unsigned long irq_flags;
682 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
684 msg_hdr->type = msg_type;
685 msg_hdr->partid = xp_partition_id;
686 msg_hdr->act_state = part->act_state;
687 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
689 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
691 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
692 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
693 if (gru_mq_desc == NULL) {
694 gru_mq_desc = kmalloc(sizeof(struct
695 gru_message_queue_desc),
697 if (gru_mq_desc == NULL) {
701 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
704 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
706 activate_gru_mq_desc_gpa);
707 if (ret != xpSuccess)
710 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
711 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
712 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
715 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
716 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
718 if (ret != xpSuccess) {
719 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
720 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
724 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
729 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
730 size_t msg_size, int msg_type)
734 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
735 if (unlikely(ret != xpSuccess))
736 XPC_DEACTIVATE_PARTITION(part, ret);
740 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
741 void *msg, size_t msg_size, int msg_type)
743 struct xpc_partition *part = &xpc_partitions[ch->partid];
746 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
747 if (unlikely(ret != xpSuccess)) {
748 if (irq_flags != NULL)
749 spin_unlock_irqrestore(&ch->lock, *irq_flags);
751 XPC_DEACTIVATE_PARTITION(part, ret);
753 if (irq_flags != NULL)
754 spin_lock_irqsave(&ch->lock, *irq_flags);
759 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
761 unsigned long irq_flags;
762 struct xpc_partition_uv *part_uv = &part->sn.uv;
765 * !!! Make our side think that the remote partition sent an activate
766 * !!! mq message our way by doing what the activate IRQ handler would
767 * !!! do had one really been sent.
770 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
771 if (part_uv->act_state_req == 0)
772 xpc_activate_IRQ_rcvd++;
773 part_uv->act_state_req = act_state_req;
774 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
776 wake_up_interruptible(&xpc_activate_IRQ_wq);
779 static enum xp_retval
780 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
786 #if defined CONFIG_X86_64
787 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
789 if (status == BIOS_STATUS_SUCCESS)
791 else if (status == BIOS_STATUS_MORE_PASSES)
792 ret = xpNeedMoreInfo;
796 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
797 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
798 if (status == SALRET_OK)
800 else if (status == SALRET_MORE_PASSES)
801 ret = xpNeedMoreInfo;
806 #error not a supported configuration
813 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
816 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
817 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
818 rp->sn.uv.activate_gru_mq_desc_gpa =
819 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
824 xpc_allow_hb_uv(short partid)
829 xpc_disallow_hb_uv(short partid)
834 xpc_disallow_all_hbs_uv(void)
839 xpc_increment_heartbeat_uv(void)
841 xpc_heartbeat_uv->value++;
845 xpc_offline_heartbeat_uv(void)
847 xpc_increment_heartbeat_uv();
848 xpc_heartbeat_uv->offline = 1;
852 xpc_online_heartbeat_uv(void)
854 xpc_increment_heartbeat_uv();
855 xpc_heartbeat_uv->offline = 0;
859 xpc_heartbeat_init_uv(void)
861 xpc_heartbeat_uv->value = 1;
862 xpc_heartbeat_uv->offline = 0;
866 xpc_heartbeat_exit_uv(void)
868 xpc_offline_heartbeat_uv();
871 static enum xp_retval
872 xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
874 struct xpc_partition_uv *part_uv = &part->sn.uv;
877 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
878 part_uv->heartbeat_gpa,
879 sizeof(struct xpc_heartbeat_uv));
880 if (ret != xpSuccess)
883 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
884 !part_uv->cached_heartbeat.offline) {
888 part->last_heartbeat = part_uv->cached_heartbeat.value;
894 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
895 unsigned long remote_rp_gpa, int nasid)
897 short partid = remote_rp->SAL_partid;
898 struct xpc_partition *part = &xpc_partitions[partid];
899 struct xpc_activate_mq_msg_activate_req_uv msg;
901 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
902 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
903 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
904 part->sn.uv.activate_gru_mq_desc_gpa =
905 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
908 * ??? Is it a good idea to make this conditional on what is
909 * ??? potentially stale state information?
911 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
912 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
913 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
914 msg.activate_gru_mq_desc_gpa =
915 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
916 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
917 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
920 if (part->act_state == XPC_P_AS_INACTIVE)
921 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
925 xpc_request_partition_reactivation_uv(struct xpc_partition *part)
927 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
931 xpc_request_partition_deactivation_uv(struct xpc_partition *part)
933 struct xpc_activate_mq_msg_deactivate_req_uv msg;
936 * ??? Is it a good idea to make this conditional on what is
937 * ??? potentially stale state information?
939 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
940 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
942 msg.reason = part->reason;
943 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
944 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
949 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
951 /* nothing needs to be done */
956 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
960 spin_lock_init(&head->lock);
965 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
967 unsigned long irq_flags;
968 struct xpc_fifo_entry_uv *first;
970 spin_lock_irqsave(&head->lock, irq_flags);
972 if (head->first != NULL) {
973 head->first = first->next;
974 if (head->first == NULL)
978 BUG_ON(head->n_entries < 0);
982 spin_unlock_irqrestore(&head->lock, irq_flags);
987 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
988 struct xpc_fifo_entry_uv *last)
990 unsigned long irq_flags;
993 spin_lock_irqsave(&head->lock, irq_flags);
994 if (head->last != NULL)
995 head->last->next = last;
1000 spin_unlock_irqrestore(&head->lock, irq_flags);
1004 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
1006 return head->n_entries;
1010 * Setup the channel structures that are uv specific.
1012 static enum xp_retval
1013 xpc_setup_ch_structures_uv(struct xpc_partition *part)
1015 struct xpc_channel_uv *ch_uv;
1018 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1019 ch_uv = &part->channels[ch_number].sn.uv;
1021 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1022 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1029 * Teardown the channel structures that are uv specific.
1032 xpc_teardown_ch_structures_uv(struct xpc_partition *part)
1034 /* nothing needs to be done */
1038 static enum xp_retval
1039 xpc_make_first_contact_uv(struct xpc_partition *part)
1041 struct xpc_activate_mq_msg_uv msg;
1044 * We send a sync msg to get the remote partition's remote_act_state
1045 * updated to our current act_state which at this point should
1046 * be XPC_P_AS_ACTIVATING.
1048 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1049 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1051 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
1052 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
1054 dev_dbg(xpc_part, "waiting to make first contact with "
1055 "partition %d\n", XPC_PARTID(part));
1057 /* wait a 1/4 of a second or so */
1058 (void)msleep_interruptible(250);
1060 if (part->act_state == XPC_P_AS_DEACTIVATING)
1061 return part->reason;
1068 xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
1070 unsigned long irq_flags;
1071 union xpc_channel_ctl_flags chctl;
1073 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1074 chctl = part->chctl;
1075 if (chctl.all_flags != 0)
1076 part->chctl.all_flags = 0;
1078 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1079 return chctl.all_flags;
1082 static enum xp_retval
1083 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1085 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1086 struct xpc_send_msg_slot_uv *msg_slot;
1087 unsigned long irq_flags;
1092 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1093 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1094 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1095 if (ch_uv->send_msg_slots == NULL)
1098 for (entry = 0; entry < nentries; entry++) {
1099 msg_slot = &ch_uv->send_msg_slots[entry];
1101 msg_slot->msg_slot_number = entry;
1102 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1106 spin_lock_irqsave(&ch->lock, irq_flags);
1107 if (nentries < ch->local_nentries)
1108 ch->local_nentries = nentries;
1109 spin_unlock_irqrestore(&ch->lock, irq_flags);
1116 static enum xp_retval
1117 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1119 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1120 struct xpc_notify_mq_msg_uv *msg_slot;
1121 unsigned long irq_flags;
1126 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1127 nbytes = nentries * ch->entry_size;
1128 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1129 if (ch_uv->recv_msg_slots == NULL)
1132 for (entry = 0; entry < nentries; entry++) {
1133 msg_slot = ch_uv->recv_msg_slots +
1134 entry * ch->entry_size;
1136 msg_slot->hdr.msg_slot_number = entry;
1139 spin_lock_irqsave(&ch->lock, irq_flags);
1140 if (nentries < ch->remote_nentries)
1141 ch->remote_nentries = nentries;
1142 spin_unlock_irqrestore(&ch->lock, irq_flags);
1150 * Allocate msg_slots associated with the channel.
1152 static enum xp_retval
1153 xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1155 static enum xp_retval ret;
1156 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1158 DBUG_ON(ch->flags & XPC_C_SETUP);
1160 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1161 gru_message_queue_desc),
1163 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1166 ret = xpc_allocate_send_msg_slot_uv(ch);
1167 if (ret == xpSuccess) {
1169 ret = xpc_allocate_recv_msg_slot_uv(ch);
1170 if (ret != xpSuccess) {
1171 kfree(ch_uv->send_msg_slots);
1172 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1179 * Free up msg_slots and clear other stuff that were setup for the specified
1183 xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1185 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1187 lockdep_assert_held(&ch->lock);
1189 kfree(ch_uv->cached_notify_gru_mq_desc);
1190 ch_uv->cached_notify_gru_mq_desc = NULL;
1192 if (ch->flags & XPC_C_SETUP) {
1193 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1194 kfree(ch_uv->send_msg_slots);
1195 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1196 kfree(ch_uv->recv_msg_slots);
1201 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1203 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1205 msg.ch_number = ch->number;
1206 msg.reason = ch->reason;
1207 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1208 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1212 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1214 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1216 msg.ch_number = ch->number;
1217 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1218 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1222 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1224 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1226 msg.ch_number = ch->number;
1227 msg.entry_size = ch->entry_size;
1228 msg.local_nentries = ch->local_nentries;
1229 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1230 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1234 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1236 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1238 msg.ch_number = ch->number;
1239 msg.local_nentries = ch->local_nentries;
1240 msg.remote_nentries = ch->remote_nentries;
1241 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1242 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1243 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1247 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1249 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1251 msg.ch_number = ch->number;
1252 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1253 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1257 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1259 unsigned long irq_flags;
1261 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1262 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1263 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1265 xpc_wakeup_channel_mgr(part);
1268 static enum xp_retval
1269 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1270 unsigned long gru_mq_desc_gpa)
1272 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1274 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1275 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1280 xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1282 struct xpc_activate_mq_msg_uv msg;
1284 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1285 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1289 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1291 struct xpc_activate_mq_msg_uv msg;
1293 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1294 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1298 xpc_assume_partition_disengaged_uv(short partid)
1300 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1301 unsigned long irq_flags;
1303 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1304 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1305 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1309 xpc_partition_engaged_uv(short partid)
1311 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1315 xpc_any_partition_engaged_uv(void)
1317 struct xpc_partition_uv *part_uv;
1320 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1321 part_uv = &xpc_partitions[partid].sn.uv;
1322 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1328 static enum xp_retval
1329 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1330 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1333 struct xpc_send_msg_slot_uv *msg_slot;
1334 struct xpc_fifo_entry_uv *entry;
1337 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1341 if (flags & XPC_NOWAIT)
1344 ret = xpc_allocate_msg_wait(ch);
1345 if (ret != xpInterrupted && ret != xpTimeout)
1349 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1350 *address_of_msg_slot = msg_slot;
1355 xpc_free_msg_slot_uv(struct xpc_channel *ch,
1356 struct xpc_send_msg_slot_uv *msg_slot)
1358 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1360 /* wakeup anyone waiting for a free msg slot */
1361 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1362 wake_up(&ch->msg_allocate_wq);
1366 xpc_notify_sender_uv(struct xpc_channel *ch,
1367 struct xpc_send_msg_slot_uv *msg_slot,
1368 enum xp_retval reason)
1370 xpc_notify_func func = msg_slot->func;
1372 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1374 atomic_dec(&ch->n_to_notify);
1376 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1377 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1378 msg_slot->msg_slot_number, ch->partid, ch->number);
1380 func(reason, ch->partid, ch->number, msg_slot->key);
1382 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1383 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1384 msg_slot->msg_slot_number, ch->partid, ch->number);
1389 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1390 struct xpc_notify_mq_msg_uv *msg)
1392 struct xpc_send_msg_slot_uv *msg_slot;
1393 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1395 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1397 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1398 msg_slot->msg_slot_number += ch->local_nentries;
1400 if (msg_slot->func != NULL)
1401 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1403 xpc_free_msg_slot_uv(ch, msg_slot);
1407 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1408 struct xpc_notify_mq_msg_uv *msg)
1410 struct xpc_partition_uv *part_uv = &part->sn.uv;
1411 struct xpc_channel *ch;
1412 struct xpc_channel_uv *ch_uv;
1413 struct xpc_notify_mq_msg_uv *msg_slot;
1414 unsigned long irq_flags;
1415 int ch_number = msg->hdr.ch_number;
1417 if (unlikely(ch_number >= part->nchannels)) {
1418 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1419 "channel number=0x%x in message from partid=%d\n",
1420 ch_number, XPC_PARTID(part));
1422 /* get hb checker to deactivate from the remote partition */
1423 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1424 if (part_uv->act_state_req == 0)
1425 xpc_activate_IRQ_rcvd++;
1426 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1427 part_uv->reason = xpBadChannelNumber;
1428 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1430 wake_up_interruptible(&xpc_activate_IRQ_wq);
1434 ch = &part->channels[ch_number];
1435 xpc_msgqueue_ref(ch);
1437 if (!(ch->flags & XPC_C_CONNECTED)) {
1438 xpc_msgqueue_deref(ch);
1442 /* see if we're really dealing with an ACK for a previously sent msg */
1443 if (msg->hdr.size == 0) {
1444 xpc_handle_notify_mq_ack_uv(ch, msg);
1445 xpc_msgqueue_deref(ch);
1449 /* we're dealing with a normal message sent via the notify_mq */
1452 msg_slot = ch_uv->recv_msg_slots +
1453 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1455 BUG_ON(msg_slot->hdr.size != 0);
1457 memcpy(msg_slot, msg, msg->hdr.size);
1459 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1461 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1463 * If there is an existing idle kthread get it to deliver
1464 * the payload, otherwise we'll have to get the channel mgr
1465 * for this partition to create a kthread to do the delivery.
1467 if (atomic_read(&ch->kthreads_idle) > 0)
1468 wake_up_nr(&ch->idle_wq, 1);
1470 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1472 xpc_msgqueue_deref(ch);
1476 xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1478 struct xpc_notify_mq_msg_uv *msg;
1480 struct xpc_partition *part;
1482 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1485 partid = msg->hdr.partid;
1486 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1487 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1488 "invalid partid=0x%x in message\n", partid);
1490 part = &xpc_partitions[partid];
1492 if (xpc_part_ref(part)) {
1493 xpc_handle_notify_mq_msg_uv(part, msg);
1494 xpc_part_deref(part);
1498 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1505 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1507 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1511 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1513 struct xpc_channel *ch = &part->channels[ch_number];
1514 int ndeliverable_payloads;
1516 xpc_msgqueue_ref(ch);
1518 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1520 if (ndeliverable_payloads > 0 &&
1521 (ch->flags & XPC_C_CONNECTED) &&
1522 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1524 xpc_activate_kthreads(ch, ndeliverable_payloads);
1527 xpc_msgqueue_deref(ch);
1530 static enum xp_retval
1531 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1532 u16 payload_size, u8 notify_type, xpc_notify_func func,
1535 enum xp_retval ret = xpSuccess;
1536 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1537 struct xpc_notify_mq_msg_uv *msg;
1538 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1541 DBUG_ON(notify_type != XPC_N_CALL);
1543 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1544 if (msg_size > ch->entry_size)
1545 return xpPayloadTooBig;
1547 xpc_msgqueue_ref(ch);
1549 if (ch->flags & XPC_C_DISCONNECTING) {
1553 if (!(ch->flags & XPC_C_CONNECTED)) {
1554 ret = xpNotConnected;
1558 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1559 if (ret != xpSuccess)
1563 atomic_inc(&ch->n_to_notify);
1565 msg_slot->key = key;
1566 smp_wmb(); /* a non-NULL func must hit memory after the key */
1567 msg_slot->func = func;
1569 if (ch->flags & XPC_C_DISCONNECTING) {
1575 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1576 msg->hdr.partid = xp_partition_id;
1577 msg->hdr.ch_number = ch->number;
1578 msg->hdr.size = msg_size;
1579 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1580 memcpy(&msg->payload, payload, payload_size);
1582 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1584 if (ret == xpSuccess)
1587 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1591 * Try to NULL the msg_slot's func field. If we fail, then
1592 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1593 * case we need to pretend we succeeded to send the message
1594 * since the user will get a callout for the disconnect error
1595 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1596 * error returned here will confuse them. Additionally, since
1597 * in this case the channel is being disconnected we don't need
1598 * to put the the msg_slot back on the free list.
1600 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1605 msg_slot->key = NULL;
1606 atomic_dec(&ch->n_to_notify);
1608 xpc_free_msg_slot_uv(ch, msg_slot);
1610 xpc_msgqueue_deref(ch);
1615 * Tell the callers of xpc_send_notify() that the status of their payloads
1616 * is unknown because the channel is now disconnecting.
1618 * We don't worry about putting these msg_slots on the free list since the
1619 * msg_slots themselves are about to be kfree'd.
1622 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1624 struct xpc_send_msg_slot_uv *msg_slot;
1627 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1629 for (entry = 0; entry < ch->local_nentries; entry++) {
1631 if (atomic_read(&ch->n_to_notify) == 0)
1634 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1635 if (msg_slot->func != NULL)
1636 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1641 * Get the next deliverable message's payload.
1644 xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1646 struct xpc_fifo_entry_uv *entry;
1647 struct xpc_notify_mq_msg_uv *msg;
1648 void *payload = NULL;
1650 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1651 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1652 if (entry != NULL) {
1653 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1655 payload = &msg->payload;
1662 xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1664 struct xpc_notify_mq_msg_uv *msg;
1667 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1669 /* return an ACK to the sender of this message */
1671 msg->hdr.partid = xp_partition_id;
1672 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1674 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1675 sizeof(struct xpc_notify_mq_msghdr_uv));
1676 if (ret != xpSuccess)
1677 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1680 static struct xpc_arch_operations xpc_arch_ops_uv = {
1681 .setup_partitions = xpc_setup_partitions_uv,
1682 .teardown_partitions = xpc_teardown_partitions_uv,
1683 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1684 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1685 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1687 .allow_hb = xpc_allow_hb_uv,
1688 .disallow_hb = xpc_disallow_hb_uv,
1689 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1690 .increment_heartbeat = xpc_increment_heartbeat_uv,
1691 .offline_heartbeat = xpc_offline_heartbeat_uv,
1692 .online_heartbeat = xpc_online_heartbeat_uv,
1693 .heartbeat_init = xpc_heartbeat_init_uv,
1694 .heartbeat_exit = xpc_heartbeat_exit_uv,
1695 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1697 .request_partition_activation =
1698 xpc_request_partition_activation_uv,
1699 .request_partition_reactivation =
1700 xpc_request_partition_reactivation_uv,
1701 .request_partition_deactivation =
1702 xpc_request_partition_deactivation_uv,
1703 .cancel_partition_deactivation_request =
1704 xpc_cancel_partition_deactivation_request_uv,
1706 .setup_ch_structures = xpc_setup_ch_structures_uv,
1707 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1709 .make_first_contact = xpc_make_first_contact_uv,
1711 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1712 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1713 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1714 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1715 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1716 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1717 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1719 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1721 .setup_msg_structures = xpc_setup_msg_structures_uv,
1722 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1724 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1725 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1726 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1727 .partition_engaged = xpc_partition_engaged_uv,
1728 .any_partition_engaged = xpc_any_partition_engaged_uv,
1730 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1731 .send_payload = xpc_send_payload_uv,
1732 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1733 .received_payload = xpc_received_payload_uv,
1734 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1738 xpc_init_mq_node(int nid)
1744 for_each_cpu(cpu, cpumask_of_node(nid)) {
1745 xpc_activate_mq_uv =
1746 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
1747 XPC_ACTIVATE_IRQ_NAME,
1748 xpc_handle_activate_IRQ_uv);
1749 if (!IS_ERR(xpc_activate_mq_uv))
1752 if (IS_ERR(xpc_activate_mq_uv)) {
1754 return PTR_ERR(xpc_activate_mq_uv);
1757 for_each_cpu(cpu, cpumask_of_node(nid)) {
1759 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
1760 XPC_NOTIFY_IRQ_NAME,
1761 xpc_handle_notify_IRQ_uv);
1762 if (!IS_ERR(xpc_notify_mq_uv))
1765 if (IS_ERR(xpc_notify_mq_uv)) {
1766 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1768 return PTR_ERR(xpc_notify_mq_uv);
1781 xpc_arch_ops = xpc_arch_ops_uv;
1783 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1784 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1785 XPC_MSG_HDR_MAX_SIZE);
1789 if (xpc_mq_node < 0)
1790 for_each_online_node(nid) {
1791 ret = xpc_init_mq_node(nid);
1797 ret = xpc_init_mq_node(xpc_mq_node);
1800 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
1809 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1810 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1813 module_param(xpc_mq_node, int, 0);
1814 MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");