1 /* ldc.c: Logical Domain Channel link-layer protocol driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/scatterlist.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/iommu-common.h>
20 #include <asm/hypervisor.h>
21 #include <asm/iommu.h>
24 #include <asm/mdesc.h>
26 #define DRV_MODULE_NAME "ldc"
27 #define PFX DRV_MODULE_NAME ": "
28 #define DRV_MODULE_VERSION "1.1"
29 #define DRV_MODULE_RELDATE "July 22, 2008"
31 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
32 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
35 static char version[] =
36 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
38 /* Packet header layout for unreliable and reliable mode frames.
39 * When in RAW mode, packets are simply straight 64-byte payloads
54 #define LDC_VERS 0x01 /* Link Version */
55 #define LDC_RTS 0x02 /* Request To Send */
56 #define LDC_RTR 0x03 /* Ready To Receive */
57 #define LDC_RDX 0x04 /* Ready for Data eXchange */
58 #define LDC_CTRL_MSK 0x0f
62 #define LDC_FRAG_MASK 0xc0
63 #define LDC_START 0x40
69 u8 u_data[LDC_PACKET_SIZE - 8];
73 u8 r_data[LDC_PACKET_SIZE - 8 - 8];
83 /* Ordered from largest major to lowest. */
84 static struct ldc_version ver_arr[] = {
85 { .major = 1, .minor = 0 },
88 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
89 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
94 int (*write)(struct ldc_channel *, const void *, unsigned int);
95 int (*read)(struct ldc_channel *, void *, unsigned int);
98 static const struct ldc_mode_ops raw_ops;
99 static const struct ldc_mode_ops nonraw_ops;
100 static const struct ldc_mode_ops stream_ops;
102 int ldom_domaining_enabled;
105 /* Protects ldc_unmap. */
107 struct ldc_mtable_entry *page_table;
108 struct iommu_map_table iommu_map_table;
112 /* Protects all operations that depend upon channel state. */
121 struct ldc_packet *tx_base;
122 unsigned long tx_head;
123 unsigned long tx_tail;
124 unsigned long tx_num_entries;
127 unsigned long tx_acked;
129 struct ldc_packet *rx_base;
130 unsigned long rx_head;
131 unsigned long rx_tail;
132 unsigned long rx_num_entries;
138 unsigned long chan_state;
140 struct ldc_channel_config cfg;
143 const struct ldc_mode_ops *mops;
145 struct ldc_iommu iommu;
147 struct ldc_version ver;
150 #define LDC_HS_CLOSED 0x00
151 #define LDC_HS_OPEN 0x01
152 #define LDC_HS_GOTVERS 0x02
153 #define LDC_HS_SENTRTR 0x03
154 #define LDC_HS_GOTRTR 0x04
155 #define LDC_HS_COMPLETE 0x10
158 #define LDC_FLAG_ALLOCED_QUEUES 0x01
159 #define LDC_FLAG_REGISTERED_QUEUES 0x02
160 #define LDC_FLAG_REGISTERED_IRQS 0x04
161 #define LDC_FLAG_RESET 0x10
166 #define LDC_IRQ_NAME_MAX 32
167 char rx_irq_name[LDC_IRQ_NAME_MAX];
168 char tx_irq_name[LDC_IRQ_NAME_MAX];
170 struct hlist_head mh_list;
172 struct hlist_node list;
175 #define ldcdbg(TYPE, f, a...) \
176 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
177 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
180 #define LDC_ABORT(lp) ldc_abort((lp), __func__)
182 static const char *state_to_str(u8 state)
185 case LDC_STATE_INVALID:
189 case LDC_STATE_BOUND:
191 case LDC_STATE_READY:
193 case LDC_STATE_CONNECTED:
200 static unsigned long __advance(unsigned long off, unsigned long num_entries)
202 off += LDC_PACKET_SIZE;
203 if (off == (num_entries * LDC_PACKET_SIZE))
209 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
211 return __advance(off, lp->rx_num_entries);
214 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
216 return __advance(off, lp->tx_num_entries);
219 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
220 unsigned long *new_tail)
222 struct ldc_packet *p;
225 t = tx_advance(lp, lp->tx_tail);
226 if (t == lp->tx_head)
232 return p + (lp->tx_tail / LDC_PACKET_SIZE);
235 /* When we are in reliable or stream mode, have to track the next packet
236 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
237 * to be careful not to stomp over the queue past that point. During
238 * the handshake, we don't have TX data packets pending in the queue
239 * and that's why handshake_get_tx_packet() need not be mindful of
242 static unsigned long head_for_data(struct ldc_channel *lp)
244 if (lp->cfg.mode == LDC_MODE_STREAM)
249 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
251 unsigned long limit, tail, new_tail, diff;
254 limit = head_for_data(lp);
256 new_tail = tx_advance(lp, tail);
257 if (new_tail == limit)
260 if (limit > new_tail)
261 diff = limit - new_tail;
264 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
265 diff /= LDC_PACKET_SIZE;
268 if (diff * mss < size)
274 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
275 unsigned long *new_tail)
277 struct ldc_packet *p;
280 h = head_for_data(lp);
281 t = tx_advance(lp, lp->tx_tail);
288 return p + (lp->tx_tail / LDC_PACKET_SIZE);
291 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
293 unsigned long orig_tail = lp->tx_tail;
297 while (limit-- > 0) {
300 err = sun4v_ldc_tx_set_qtail(lp->id, tail);
304 if (err != HV_EWOULDBLOCK) {
305 lp->tx_tail = orig_tail;
311 lp->tx_tail = orig_tail;
315 /* This just updates the head value in the hypervisor using
316 * a polling loop with a timeout. The caller takes care of
317 * upating software state representing the head change, if any.
319 static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
323 while (limit-- > 0) {
326 err = sun4v_ldc_rx_set_qhead(lp->id, head);
330 if (err != HV_EWOULDBLOCK)
339 static int send_tx_packet(struct ldc_channel *lp,
340 struct ldc_packet *p,
341 unsigned long new_tail)
343 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
345 return set_tx_tail(lp, new_tail);
348 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
350 void *data, int dlen,
351 unsigned long *new_tail)
353 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
356 memset(p, 0, sizeof(*p));
361 memcpy(p->u.u_data, data, dlen);
366 static int start_handshake(struct ldc_channel *lp)
368 struct ldc_packet *p;
369 struct ldc_version *ver;
370 unsigned long new_tail;
374 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
375 ver->major, ver->minor);
377 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
378 ver, sizeof(*ver), &new_tail);
380 int err = send_tx_packet(lp, p, new_tail);
382 lp->flags &= ~LDC_FLAG_RESET;
388 static int send_version_nack(struct ldc_channel *lp,
389 u16 major, u16 minor)
391 struct ldc_packet *p;
392 struct ldc_version ver;
393 unsigned long new_tail;
398 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
399 &ver, sizeof(ver), &new_tail);
401 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
402 ver.major, ver.minor);
404 return send_tx_packet(lp, p, new_tail);
409 static int send_version_ack(struct ldc_channel *lp,
410 struct ldc_version *vp)
412 struct ldc_packet *p;
413 unsigned long new_tail;
415 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
416 vp, sizeof(*vp), &new_tail);
418 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
419 vp->major, vp->minor);
421 return send_tx_packet(lp, p, new_tail);
426 static int send_rts(struct ldc_channel *lp)
428 struct ldc_packet *p;
429 unsigned long new_tail;
431 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
434 p->env = lp->cfg.mode;
438 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
441 return send_tx_packet(lp, p, new_tail);
446 static int send_rtr(struct ldc_channel *lp)
448 struct ldc_packet *p;
449 unsigned long new_tail;
451 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
454 p->env = lp->cfg.mode;
457 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
460 return send_tx_packet(lp, p, new_tail);
465 static int send_rdx(struct ldc_channel *lp)
467 struct ldc_packet *p;
468 unsigned long new_tail;
470 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
474 p->seqid = ++lp->snd_nxt;
475 p->u.r.ackid = lp->rcv_nxt;
477 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
478 p->env, p->seqid, p->u.r.ackid);
480 return send_tx_packet(lp, p, new_tail);
485 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
487 struct ldc_packet *p;
488 unsigned long new_tail;
491 p = data_get_tx_packet(lp, &new_tail);
494 memset(p, 0, sizeof(*p));
495 p->type = data_pkt->type;
497 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
498 p->seqid = lp->snd_nxt + 1;
499 p->u.r.ackid = lp->rcv_nxt;
501 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
502 p->type, p->ctrl, p->seqid, p->u.r.ackid);
504 err = send_tx_packet(lp, p, new_tail);
511 static int ldc_abort(struct ldc_channel *lp, const char *msg)
513 unsigned long hv_err;
515 ldcdbg(STATE, "ABORT[%s]\n", msg);
518 /* We report but do not act upon the hypervisor errors because
519 * there really isn't much we can do if they fail at this point.
521 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
523 printk(KERN_ERR PFX "ldc_abort: "
524 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
525 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
527 hv_err = sun4v_ldc_tx_get_state(lp->id,
532 printk(KERN_ERR PFX "ldc_abort: "
533 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
536 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
538 printk(KERN_ERR PFX "ldc_abort: "
539 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
540 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
542 /* Refetch the RX queue state as well, because we could be invoked
543 * here in the queue processing context.
545 hv_err = sun4v_ldc_rx_get_state(lp->id,
550 printk(KERN_ERR PFX "ldc_abort: "
551 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
557 static struct ldc_version *find_by_major(u16 major)
559 struct ldc_version *ret = NULL;
562 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
563 struct ldc_version *v = &ver_arr[i];
564 if (v->major <= major) {
572 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
574 struct ldc_version *vap;
577 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
578 vp->major, vp->minor);
580 if (lp->hs_state == LDC_HS_GOTVERS) {
581 lp->hs_state = LDC_HS_OPEN;
582 memset(&lp->ver, 0, sizeof(lp->ver));
585 vap = find_by_major(vp->major);
587 err = send_version_nack(lp, 0, 0);
588 } else if (vap->major != vp->major) {
589 err = send_version_nack(lp, vap->major, vap->minor);
591 struct ldc_version ver = *vp;
592 if (ver.minor > vap->minor)
593 ver.minor = vap->minor;
594 err = send_version_ack(lp, &ver);
597 lp->hs_state = LDC_HS_GOTVERS;
601 return LDC_ABORT(lp);
606 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
608 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
609 vp->major, vp->minor);
611 if (lp->hs_state == LDC_HS_GOTVERS) {
612 if (lp->ver.major != vp->major ||
613 lp->ver.minor != vp->minor)
614 return LDC_ABORT(lp);
617 lp->hs_state = LDC_HS_GOTVERS;
620 return LDC_ABORT(lp);
624 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
626 struct ldc_version *vap;
627 struct ldc_packet *p;
628 unsigned long new_tail;
630 if (vp->major == 0 && vp->minor == 0)
631 return LDC_ABORT(lp);
633 vap = find_by_major(vp->major);
635 return LDC_ABORT(lp);
637 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
641 return LDC_ABORT(lp);
643 return send_tx_packet(lp, p, new_tail);
646 static int process_version(struct ldc_channel *lp,
647 struct ldc_packet *p)
649 struct ldc_version *vp;
651 vp = (struct ldc_version *) p->u.u_data;
655 return process_ver_info(lp, vp);
658 return process_ver_ack(lp, vp);
661 return process_ver_nack(lp, vp);
664 return LDC_ABORT(lp);
668 static int process_rts(struct ldc_channel *lp,
669 struct ldc_packet *p)
671 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
672 p->stype, p->seqid, p->env);
674 if (p->stype != LDC_INFO ||
675 lp->hs_state != LDC_HS_GOTVERS ||
676 p->env != lp->cfg.mode)
677 return LDC_ABORT(lp);
679 lp->snd_nxt = p->seqid;
680 lp->rcv_nxt = p->seqid;
681 lp->hs_state = LDC_HS_SENTRTR;
683 return LDC_ABORT(lp);
688 static int process_rtr(struct ldc_channel *lp,
689 struct ldc_packet *p)
691 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
692 p->stype, p->seqid, p->env);
694 if (p->stype != LDC_INFO ||
695 p->env != lp->cfg.mode)
696 return LDC_ABORT(lp);
698 lp->snd_nxt = p->seqid;
699 lp->hs_state = LDC_HS_COMPLETE;
700 ldc_set_state(lp, LDC_STATE_CONNECTED);
706 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
708 return lp->rcv_nxt + 1 == seqid;
711 static int process_rdx(struct ldc_channel *lp,
712 struct ldc_packet *p)
714 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
715 p->stype, p->seqid, p->env, p->u.r.ackid);
717 if (p->stype != LDC_INFO ||
718 !(rx_seq_ok(lp, p->seqid)))
719 return LDC_ABORT(lp);
721 lp->rcv_nxt = p->seqid;
723 lp->hs_state = LDC_HS_COMPLETE;
724 ldc_set_state(lp, LDC_STATE_CONNECTED);
729 static int process_control_frame(struct ldc_channel *lp,
730 struct ldc_packet *p)
734 return process_version(lp, p);
737 return process_rts(lp, p);
740 return process_rtr(lp, p);
743 return process_rdx(lp, p);
746 return LDC_ABORT(lp);
750 static int process_error_frame(struct ldc_channel *lp,
751 struct ldc_packet *p)
753 return LDC_ABORT(lp);
756 static int process_data_ack(struct ldc_channel *lp,
757 struct ldc_packet *ack)
759 unsigned long head = lp->tx_acked;
760 u32 ackid = ack->u.r.ackid;
763 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
765 head = tx_advance(lp, head);
767 if (p->seqid == ackid) {
771 if (head == lp->tx_tail)
772 return LDC_ABORT(lp);
778 static void send_events(struct ldc_channel *lp, unsigned int event_mask)
780 if (event_mask & LDC_EVENT_RESET)
781 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
782 if (event_mask & LDC_EVENT_UP)
783 lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
784 if (event_mask & LDC_EVENT_DATA_READY)
785 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
788 static irqreturn_t ldc_rx(int irq, void *dev_id)
790 struct ldc_channel *lp = dev_id;
791 unsigned long orig_state, flags;
792 unsigned int event_mask;
794 spin_lock_irqsave(&lp->lock, flags);
796 orig_state = lp->chan_state;
798 /* We should probably check for hypervisor errors here and
799 * reset the LDC channel if we get one.
801 sun4v_ldc_rx_get_state(lp->id,
806 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
807 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
811 if (lp->cfg.mode == LDC_MODE_RAW &&
812 lp->chan_state == LDC_CHANNEL_UP) {
813 lp->hs_state = LDC_HS_COMPLETE;
814 ldc_set_state(lp, LDC_STATE_CONNECTED);
817 * Generate an LDC_EVENT_UP event if the channel
818 * was not already up.
820 if (orig_state != LDC_CHANNEL_UP) {
821 event_mask |= LDC_EVENT_UP;
822 orig_state = lp->chan_state;
826 /* If we are in reset state, flush the RX queue and ignore
829 if (lp->flags & LDC_FLAG_RESET) {
830 (void) ldc_rx_reset(lp);
834 /* Once we finish the handshake, we let the ldc_read()
835 * paths do all of the control frame and state management.
836 * Just trigger the callback.
838 if (lp->hs_state == LDC_HS_COMPLETE) {
840 if (lp->chan_state != orig_state) {
841 unsigned int event = LDC_EVENT_RESET;
843 if (lp->chan_state == LDC_CHANNEL_UP)
844 event = LDC_EVENT_UP;
848 if (lp->rx_head != lp->rx_tail)
849 event_mask |= LDC_EVENT_DATA_READY;
854 if (lp->chan_state != orig_state)
857 while (lp->rx_head != lp->rx_tail) {
858 struct ldc_packet *p;
862 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
866 err = process_control_frame(lp, p);
872 event_mask |= LDC_EVENT_DATA_READY;
877 err = process_error_frame(lp, p);
889 new += LDC_PACKET_SIZE;
890 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
894 err = __set_rx_head(lp, new);
896 (void) LDC_ABORT(lp);
899 if (lp->hs_state == LDC_HS_COMPLETE)
900 goto handshake_complete;
904 spin_unlock_irqrestore(&lp->lock, flags);
906 send_events(lp, event_mask);
911 static irqreturn_t ldc_tx(int irq, void *dev_id)
913 struct ldc_channel *lp = dev_id;
914 unsigned long flags, orig_state;
915 unsigned int event_mask = 0;
917 spin_lock_irqsave(&lp->lock, flags);
919 orig_state = lp->chan_state;
921 /* We should probably check for hypervisor errors here and
922 * reset the LDC channel if we get one.
924 sun4v_ldc_tx_get_state(lp->id,
929 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
930 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
932 if (lp->cfg.mode == LDC_MODE_RAW &&
933 lp->chan_state == LDC_CHANNEL_UP) {
934 lp->hs_state = LDC_HS_COMPLETE;
935 ldc_set_state(lp, LDC_STATE_CONNECTED);
938 * Generate an LDC_EVENT_UP event if the channel
939 * was not already up.
941 if (orig_state != LDC_CHANNEL_UP) {
942 event_mask |= LDC_EVENT_UP;
943 orig_state = lp->chan_state;
947 spin_unlock_irqrestore(&lp->lock, flags);
949 send_events(lp, event_mask);
954 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
955 * XXX that addition and removal from the ldc_channel_list has
956 * XXX atomicity, otherwise the __ldc_channel_exists() check is
957 * XXX totally pointless as another thread can slip into ldc_alloc()
958 * XXX and add a channel with the same ID. There also needs to be
959 * XXX a spinlock for ldc_channel_list.
961 static HLIST_HEAD(ldc_channel_list);
963 static int __ldc_channel_exists(unsigned long id)
965 struct ldc_channel *lp;
967 hlist_for_each_entry(lp, &ldc_channel_list, list) {
974 static int alloc_queue(const char *name, unsigned long num_entries,
975 struct ldc_packet **base, unsigned long *ra)
977 unsigned long size, order;
980 size = num_entries * LDC_PACKET_SIZE;
981 order = get_order(size);
983 q = (void *) __get_free_pages(GFP_KERNEL, order);
985 printk(KERN_ERR PFX "Alloc of %s queue failed with "
986 "size=%lu order=%lu\n", name, size, order);
990 memset(q, 0, PAGE_SIZE << order);
998 static void free_queue(unsigned long num_entries, struct ldc_packet *q)
1000 unsigned long size, order;
1005 size = num_entries * LDC_PACKET_SIZE;
1006 order = get_order(size);
1008 free_pages((unsigned long)q, order);
1011 static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
1013 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
1014 /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
1016 cookie &= ~COOKIE_PGSZ_CODE;
1018 return (cookie >> (13ULL + (szcode * 3ULL)));
1021 static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
1022 unsigned long entry, unsigned long npages)
1024 struct ldc_mtable_entry *base;
1025 unsigned long i, shift;
1027 shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
1028 base = iommu->page_table + entry;
1029 for (i = 0; i < npages; i++) {
1031 sun4v_ldc_revoke(id, cookie + (i << shift),
1037 /* XXX Make this configurable... XXX */
1038 #define LDC_IOTABLE_SIZE (8 * 1024)
1040 static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
1042 unsigned long sz, num_tsb_entries, tsbsize, order;
1043 struct ldc_iommu *ldc_iommu = &lp->iommu;
1044 struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
1045 struct ldc_mtable_entry *table;
1046 unsigned long hv_err;
1049 num_tsb_entries = LDC_IOTABLE_SIZE;
1050 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1051 spin_lock_init(&ldc_iommu->lock);
1053 sz = num_tsb_entries / 8;
1054 sz = (sz + 7UL) & ~7UL;
1055 iommu->map = kzalloc(sz, GFP_KERNEL);
1057 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
1060 iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
1061 NULL, false /* no large pool */,
1063 true /* skip span boundary check */);
1065 order = get_order(tsbsize);
1067 table = (struct ldc_mtable_entry *)
1068 __get_free_pages(GFP_KERNEL, order);
1071 printk(KERN_ERR PFX "Alloc of MTE table failed, "
1072 "size=%lu order=%lu\n", tsbsize, order);
1076 memset(table, 0, PAGE_SIZE << order);
1078 ldc_iommu->page_table = table;
1080 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
1084 goto out_free_table;
1089 free_pages((unsigned long) table, order);
1090 ldc_iommu->page_table = NULL;
1099 static void ldc_iommu_release(struct ldc_channel *lp)
1101 struct ldc_iommu *ldc_iommu = &lp->iommu;
1102 struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
1103 unsigned long num_tsb_entries, tsbsize, order;
1105 (void) sun4v_ldc_set_map_table(lp->id, 0, 0);
1107 num_tsb_entries = iommu->poolsize * iommu->nr_pools;
1108 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1109 order = get_order(tsbsize);
1111 free_pages((unsigned long) ldc_iommu->page_table, order);
1112 ldc_iommu->page_table = NULL;
1118 struct ldc_channel *ldc_alloc(unsigned long id,
1119 const struct ldc_channel_config *cfgp,
1123 struct ldc_channel *lp;
1124 const struct ldc_mode_ops *mops;
1125 unsigned long dummy1, dummy2, hv_err;
1130 if (!ldom_domaining_enabled)
1139 switch (cfgp->mode) {
1142 mss = LDC_PACKET_SIZE;
1145 case LDC_MODE_UNRELIABLE:
1147 mss = LDC_PACKET_SIZE - 8;
1150 case LDC_MODE_STREAM:
1152 mss = LDC_PACKET_SIZE - 8 - 8;
1159 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
1162 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
1164 if (hv_err == HV_ECHANNEL)
1168 if (__ldc_channel_exists(id))
1173 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
1178 spin_lock_init(&lp->lock);
1182 err = ldc_iommu_init(name, lp);
1191 lp->cfg.mtu = LDC_DEFAULT_MTU;
1193 if (lp->cfg.mode == LDC_MODE_STREAM) {
1194 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
1197 goto out_free_iommu;
1199 lp->mssbuf = mssbuf;
1202 lp->event_arg = event_arg;
1204 /* XXX allow setting via ldc_channel_config to override defaults
1205 * XXX or use some formula based upon mtu
1207 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1208 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1210 err = alloc_queue("TX", lp->tx_num_entries,
1211 &lp->tx_base, &lp->tx_ra);
1213 goto out_free_mssbuf;
1215 err = alloc_queue("RX", lp->rx_num_entries,
1216 &lp->rx_base, &lp->rx_ra);
1220 lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
1222 lp->hs_state = LDC_HS_CLOSED;
1223 ldc_set_state(lp, LDC_STATE_INIT);
1225 INIT_HLIST_NODE(&lp->list);
1226 hlist_add_head(&lp->list, &ldc_channel_list);
1228 INIT_HLIST_HEAD(&lp->mh_list);
1230 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1231 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1233 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1234 lp->rx_irq_name, lp);
1238 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1239 lp->tx_irq_name, lp);
1241 free_irq(lp->cfg.rx_irq, lp);
1248 free_queue(lp->tx_num_entries, lp->tx_base);
1254 ldc_iommu_release(lp);
1260 return ERR_PTR(err);
1262 EXPORT_SYMBOL(ldc_alloc);
1264 void ldc_unbind(struct ldc_channel *lp)
1266 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
1267 free_irq(lp->cfg.rx_irq, lp);
1268 free_irq(lp->cfg.tx_irq, lp);
1269 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1272 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
1273 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1274 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1275 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1277 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
1278 free_queue(lp->tx_num_entries, lp->tx_base);
1279 free_queue(lp->rx_num_entries, lp->rx_base);
1280 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
1283 ldc_set_state(lp, LDC_STATE_INIT);
1285 EXPORT_SYMBOL(ldc_unbind);
1287 void ldc_free(struct ldc_channel *lp)
1290 hlist_del(&lp->list);
1292 ldc_iommu_release(lp);
1296 EXPORT_SYMBOL(ldc_free);
1298 /* Bind the channel. This registers the LDC queues with
1299 * the hypervisor and puts the channel into a pseudo-listening
1300 * state. This does not initiate a handshake, ldc_connect() does
1303 int ldc_bind(struct ldc_channel *lp)
1305 unsigned long hv_err, flags;
1308 if (lp->state != LDC_STATE_INIT)
1311 spin_lock_irqsave(&lp->lock, flags);
1313 enable_irq(lp->cfg.rx_irq);
1314 enable_irq(lp->cfg.tx_irq);
1316 lp->flags |= LDC_FLAG_REGISTERED_IRQS;
1319 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1323 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1327 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1331 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1335 lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
1337 hv_err = sun4v_ldc_tx_get_state(lp->id,
1345 lp->tx_acked = lp->tx_head;
1347 lp->hs_state = LDC_HS_OPEN;
1348 ldc_set_state(lp, LDC_STATE_BOUND);
1350 if (lp->cfg.mode == LDC_MODE_RAW) {
1352 * There is no handshake in RAW mode, so handshake
1355 lp->hs_state = LDC_HS_COMPLETE;
1358 spin_unlock_irqrestore(&lp->lock, flags);
1363 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1364 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1367 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1370 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1371 free_irq(lp->cfg.tx_irq, lp);
1372 free_irq(lp->cfg.rx_irq, lp);
1374 spin_unlock_irqrestore(&lp->lock, flags);
1378 EXPORT_SYMBOL(ldc_bind);
1380 int ldc_connect(struct ldc_channel *lp)
1382 unsigned long flags;
1385 if (lp->cfg.mode == LDC_MODE_RAW)
1388 spin_lock_irqsave(&lp->lock, flags);
1390 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1391 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
1392 lp->hs_state != LDC_HS_OPEN)
1393 err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
1395 err = start_handshake(lp);
1397 spin_unlock_irqrestore(&lp->lock, flags);
1401 EXPORT_SYMBOL(ldc_connect);
1403 int ldc_disconnect(struct ldc_channel *lp)
1405 unsigned long hv_err, flags;
1408 if (lp->cfg.mode == LDC_MODE_RAW)
1411 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1412 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
1415 spin_lock_irqsave(&lp->lock, flags);
1418 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1422 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1426 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1430 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1434 ldc_set_state(lp, LDC_STATE_BOUND);
1435 lp->hs_state = LDC_HS_OPEN;
1436 lp->flags |= LDC_FLAG_RESET;
1438 spin_unlock_irqrestore(&lp->lock, flags);
1443 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1444 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1445 free_irq(lp->cfg.tx_irq, lp);
1446 free_irq(lp->cfg.rx_irq, lp);
1447 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
1448 LDC_FLAG_REGISTERED_QUEUES);
1449 ldc_set_state(lp, LDC_STATE_INIT);
1451 spin_unlock_irqrestore(&lp->lock, flags);
1455 EXPORT_SYMBOL(ldc_disconnect);
1457 int ldc_state(struct ldc_channel *lp)
1461 EXPORT_SYMBOL(ldc_state);
1463 void ldc_set_state(struct ldc_channel *lp, u8 state)
1465 ldcdbg(STATE, "STATE (%s) --> (%s)\n",
1466 state_to_str(lp->state),
1467 state_to_str(state));
1471 EXPORT_SYMBOL(ldc_set_state);
1473 int ldc_mode(struct ldc_channel *lp)
1475 return lp->cfg.mode;
1477 EXPORT_SYMBOL(ldc_mode);
1479 int ldc_rx_reset(struct ldc_channel *lp)
1481 return __set_rx_head(lp, lp->rx_tail);
1484 void __ldc_print(struct ldc_channel *lp, const char *caller)
1486 pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
1487 "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
1488 "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
1489 "\trcv_nxt=%u snd_nxt=%u\n",
1490 caller, lp->id, lp->flags, state_to_str(lp->state),
1491 lp->chan_state, lp->hs_state,
1492 lp->rx_head, lp->rx_tail, lp->rx_num_entries,
1493 lp->tx_head, lp->tx_tail, lp->tx_num_entries,
1494 lp->rcv_nxt, lp->snd_nxt);
1497 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
1499 struct ldc_packet *p;
1500 unsigned long new_tail, hv_err;
1503 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1505 if (unlikely(hv_err))
1508 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1509 return LDC_ABORT(lp);
1511 if (size > LDC_PACKET_SIZE)
1514 p = data_get_tx_packet(lp, &new_tail);
1518 memcpy(p, buf, size);
1520 err = send_tx_packet(lp, p, new_tail);
1527 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
1529 struct ldc_packet *p;
1530 unsigned long hv_err, new;
1533 if (size < LDC_PACKET_SIZE)
1536 hv_err = sun4v_ldc_rx_get_state(lp->id,
1541 return LDC_ABORT(lp);
1543 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1544 lp->chan_state == LDC_CHANNEL_RESETTING)
1547 if (lp->rx_head == lp->rx_tail)
1550 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
1551 memcpy(buf, p, LDC_PACKET_SIZE);
1553 new = rx_advance(lp, lp->rx_head);
1556 err = __set_rx_head(lp, new);
1560 err = LDC_PACKET_SIZE;
1565 static const struct ldc_mode_ops raw_ops = {
1570 static int write_nonraw(struct ldc_channel *lp, const void *buf,
1573 unsigned long hv_err, tail;
1574 unsigned int copied;
1578 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1580 if (unlikely(hv_err))
1583 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1584 return LDC_ABORT(lp);
1586 if (!tx_has_space_for(lp, size))
1592 while (copied < size) {
1593 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
1594 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
1600 p->stype = LDC_INFO;
1603 data_len = size - copied;
1604 if (data_len > lp->mss)
1607 BUG_ON(data_len > LDC_LEN);
1609 p->env = (data_len |
1610 (copied == 0 ? LDC_START : 0) |
1611 (data_len == size - copied ? LDC_STOP : 0));
1615 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1622 memcpy(data, buf, data_len);
1626 tail = tx_advance(lp, tail);
1629 err = set_tx_tail(lp, tail);
1638 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
1639 struct ldc_packet *first_frag)
1644 lp->rcv_nxt = first_frag->seqid - 1;
1646 err = send_data_nack(lp, p);
1650 err = ldc_rx_reset(lp);
1652 return LDC_ABORT(lp);
1657 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
1659 if (p->stype & LDC_ACK) {
1660 int err = process_data_ack(lp, p);
1664 if (p->stype & LDC_NACK)
1665 return LDC_ABORT(lp);
1670 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
1672 unsigned long dummy;
1675 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1676 cur_head, lp->rx_head, lp->rx_tail);
1677 while (limit-- > 0) {
1678 unsigned long hv_err;
1680 hv_err = sun4v_ldc_rx_get_state(lp->id,
1685 return LDC_ABORT(lp);
1687 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1688 lp->chan_state == LDC_CHANNEL_RESETTING)
1691 if (cur_head != lp->rx_tail) {
1692 ldcdbg(DATA, "DATA WAIT DONE "
1693 "head[%lx] tail[%lx] chan_state[%lx]\n",
1694 dummy, lp->rx_tail, lp->chan_state);
1703 static int rx_set_head(struct ldc_channel *lp, unsigned long head)
1705 int err = __set_rx_head(lp, head);
1708 return LDC_ABORT(lp);
1714 static void send_data_ack(struct ldc_channel *lp)
1716 unsigned long new_tail;
1717 struct ldc_packet *p;
1719 p = data_get_tx_packet(lp, &new_tail);
1723 memset(p, 0, sizeof(*p));
1727 p->seqid = lp->snd_nxt + 1;
1728 p->u.r.ackid = lp->rcv_nxt;
1730 err = send_tx_packet(lp, p, new_tail);
1736 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
1738 struct ldc_packet *first_frag;
1739 unsigned long hv_err, new;
1742 hv_err = sun4v_ldc_rx_get_state(lp->id,
1747 return LDC_ABORT(lp);
1749 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1750 lp->chan_state == LDC_CHANNEL_RESETTING)
1753 if (lp->rx_head == lp->rx_tail)
1760 struct ldc_packet *p;
1763 BUG_ON(new == lp->rx_tail);
1764 p = lp->rx_base + (new / LDC_PACKET_SIZE);
1766 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1776 if (unlikely(!rx_seq_ok(lp, p->seqid))) {
1777 err = rx_bad_seq(lp, p, first_frag);
1782 if (p->type & LDC_CTRL) {
1783 err = process_control_frame(lp, p);
1789 lp->rcv_nxt = p->seqid;
1792 * If this is a control-only packet, there is nothing
1793 * else to do but advance the rx queue since the packet
1794 * was already processed above.
1796 if (!(p->type & LDC_DATA)) {
1797 new = rx_advance(lp, new);
1800 if (p->stype & (LDC_ACK | LDC_NACK)) {
1801 err = data_ack_nack(lp, p);
1805 if (!(p->stype & LDC_INFO)) {
1806 new = rx_advance(lp, new);
1807 err = rx_set_head(lp, new);
1813 pkt_len = p->env & LDC_LEN;
1815 /* Every initial packet starts with the START bit set.
1817 * Singleton packets will have both START+STOP set.
1819 * Fragments will have START set in the first frame, STOP
1820 * set in the last frame, and neither bit set in middle
1821 * frames of the packet.
1823 * Therefore if we are at the beginning of a packet and
1824 * we don't see START, or we are in the middle of a fragmented
1825 * packet and do see START, we are unsynchronized and should
1826 * flush the RX queue.
1828 if ((first_frag == NULL && !(p->env & LDC_START)) ||
1829 (first_frag != NULL && (p->env & LDC_START))) {
1831 new = rx_advance(lp, new);
1833 err = rx_set_head(lp, new);
1843 if (pkt_len > size - copied) {
1844 /* User didn't give us a big enough buffer,
1845 * what to do? This is a pretty serious error.
1847 * Since we haven't updated the RX ring head to
1848 * consume any of the packets, signal the error
1849 * to the user and just leave the RX ring alone.
1851 * This seems the best behavior because this allows
1852 * a user of the LDC layer to start with a small
1853 * RX buffer for ldc_read() calls and use -EMSGSIZE
1854 * as a cue to enlarge it's read buffer.
1860 /* Ok, we are gonna eat this one. */
1861 new = rx_advance(lp, new);
1864 (lp->cfg.mode == LDC_MODE_UNRELIABLE ?
1865 p->u.u_data : p->u.r.r_data), pkt_len);
1869 if (p->env & LDC_STOP)
1873 if (new == lp->rx_tail) {
1874 err = rx_data_wait(lp, new);
1881 err = rx_set_head(lp, new);
1883 if (err && first_frag)
1884 lp->rcv_nxt = first_frag->seqid - 1;
1888 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
1895 static const struct ldc_mode_ops nonraw_ops = {
1896 .write = write_nonraw,
1897 .read = read_nonraw,
1900 static int write_stream(struct ldc_channel *lp, const void *buf,
1903 if (size > lp->cfg.mtu)
1905 return write_nonraw(lp, buf, size);
1908 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
1910 if (!lp->mssbuf_len) {
1911 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
1915 lp->mssbuf_len = err;
1919 if (size > lp->mssbuf_len)
1920 size = lp->mssbuf_len;
1921 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
1923 lp->mssbuf_off += size;
1924 lp->mssbuf_len -= size;
1929 static const struct ldc_mode_ops stream_ops = {
1930 .write = write_stream,
1931 .read = read_stream,
1934 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
1936 unsigned long flags;
1945 spin_lock_irqsave(&lp->lock, flags);
1947 if (lp->hs_state != LDC_HS_COMPLETE)
1950 err = lp->mops->write(lp, buf, size);
1952 spin_unlock_irqrestore(&lp->lock, flags);
1956 EXPORT_SYMBOL(ldc_write);
1958 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
1960 unsigned long flags;
1963 ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
1971 spin_lock_irqsave(&lp->lock, flags);
1973 if (lp->hs_state != LDC_HS_COMPLETE)
1976 err = lp->mops->read(lp, buf, size);
1978 spin_unlock_irqrestore(&lp->lock, flags);
1980 ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
1981 lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
1985 EXPORT_SYMBOL(ldc_read);
1987 static u64 pagesize_code(void)
1989 switch (PAGE_SIZE) {
1991 case (8ULL * 1024ULL):
1993 case (64ULL * 1024ULL):
1995 case (512ULL * 1024ULL):
1997 case (4ULL * 1024ULL * 1024ULL):
1999 case (32ULL * 1024ULL * 1024ULL):
2001 case (256ULL * 1024ULL * 1024ULL):
2006 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
2008 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
2009 (index << PAGE_SHIFT) |
2014 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
2015 unsigned long npages)
2019 entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
2020 npages, NULL, (unsigned long)-1, 0);
2021 if (unlikely(entry == IOMMU_ERROR_CODE))
2024 return iommu->page_table + entry;
2027 static u64 perm_to_mte(unsigned int map_perm)
2031 mte_base = pagesize_code();
2033 if (map_perm & LDC_MAP_SHADOW) {
2034 if (map_perm & LDC_MAP_R)
2035 mte_base |= LDC_MTE_COPY_R;
2036 if (map_perm & LDC_MAP_W)
2037 mte_base |= LDC_MTE_COPY_W;
2039 if (map_perm & LDC_MAP_DIRECT) {
2040 if (map_perm & LDC_MAP_R)
2041 mte_base |= LDC_MTE_READ;
2042 if (map_perm & LDC_MAP_W)
2043 mte_base |= LDC_MTE_WRITE;
2044 if (map_perm & LDC_MAP_X)
2045 mte_base |= LDC_MTE_EXEC;
2047 if (map_perm & LDC_MAP_IO) {
2048 if (map_perm & LDC_MAP_R)
2049 mte_base |= LDC_MTE_IOMMU_R;
2050 if (map_perm & LDC_MAP_W)
2051 mte_base |= LDC_MTE_IOMMU_W;
2057 static int pages_in_region(unsigned long base, long len)
2062 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
2064 len -= (new - base);
2072 struct cookie_state {
2073 struct ldc_mtable_entry *page_table;
2074 struct ldc_trans_cookie *cookies;
2081 static void fill_cookies(struct cookie_state *sp, unsigned long pa,
2082 unsigned long off, unsigned long len)
2085 unsigned long tlen, new = pa + PAGE_SIZE;
2088 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
2092 tlen = PAGE_SIZE - off;
2096 this_cookie = make_cookie(sp->pte_idx,
2097 pagesize_code(), off);
2101 if (this_cookie == sp->prev_cookie) {
2102 sp->cookies[sp->nc - 1].cookie_size += tlen;
2104 sp->cookies[sp->nc].cookie_addr = this_cookie;
2105 sp->cookies[sp->nc].cookie_size = tlen;
2108 sp->prev_cookie = this_cookie + tlen;
2117 static int sg_count_one(struct scatterlist *sg)
2119 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
2120 long len = sg->length;
2122 if ((sg->offset | len) & (8UL - 1))
2125 return pages_in_region(base + sg->offset, len);
2128 static int sg_count_pages(struct scatterlist *sg, int num_sg)
2134 for (i = 0; i < num_sg; i++) {
2135 int err = sg_count_one(sg + i);
2144 int ldc_map_sg(struct ldc_channel *lp,
2145 struct scatterlist *sg, int num_sg,
2146 struct ldc_trans_cookie *cookies, int ncookies,
2147 unsigned int map_perm)
2149 unsigned long i, npages;
2150 struct ldc_mtable_entry *base;
2151 struct cookie_state state;
2152 struct ldc_iommu *iommu;
2154 struct scatterlist *s;
2156 if (map_perm & ~LDC_MAP_ALL)
2159 err = sg_count_pages(sg, num_sg);
2169 base = alloc_npages(iommu, npages);
2174 state.page_table = iommu->page_table;
2175 state.cookies = cookies;
2176 state.mte_base = perm_to_mte(map_perm);
2177 state.prev_cookie = ~(u64)0;
2178 state.pte_idx = (base - iommu->page_table);
2181 for_each_sg(sg, s, num_sg, i) {
2182 fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT,
2183 s->offset, s->length);
2188 EXPORT_SYMBOL(ldc_map_sg);
2190 int ldc_map_single(struct ldc_channel *lp,
2191 void *buf, unsigned int len,
2192 struct ldc_trans_cookie *cookies, int ncookies,
2193 unsigned int map_perm)
2195 unsigned long npages, pa;
2196 struct ldc_mtable_entry *base;
2197 struct cookie_state state;
2198 struct ldc_iommu *iommu;
2200 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
2204 if ((pa | len) & (8UL - 1))
2207 npages = pages_in_region(pa, len);
2211 base = alloc_npages(iommu, npages);
2216 state.page_table = iommu->page_table;
2217 state.cookies = cookies;
2218 state.mte_base = perm_to_mte(map_perm);
2219 state.prev_cookie = ~(u64)0;
2220 state.pte_idx = (base - iommu->page_table);
2222 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
2223 BUG_ON(state.nc > ncookies);
2227 EXPORT_SYMBOL(ldc_map_single);
2230 static void free_npages(unsigned long id, struct ldc_iommu *iommu,
2231 u64 cookie, u64 size)
2233 unsigned long npages, entry;
2235 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
2237 entry = ldc_cookie_to_index(cookie, iommu);
2238 ldc_demap(iommu, id, cookie, entry, npages);
2239 iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
2242 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
2245 struct ldc_iommu *iommu = &lp->iommu;
2247 unsigned long flags;
2249 spin_lock_irqsave(&iommu->lock, flags);
2250 for (i = 0; i < ncookies; i++) {
2251 u64 addr = cookies[i].cookie_addr;
2252 u64 size = cookies[i].cookie_size;
2254 free_npages(lp->id, iommu, addr, size);
2256 spin_unlock_irqrestore(&iommu->lock, flags);
2258 EXPORT_SYMBOL(ldc_unmap);
2260 int ldc_copy(struct ldc_channel *lp, int copy_dir,
2261 void *buf, unsigned int len, unsigned long offset,
2262 struct ldc_trans_cookie *cookies, int ncookies)
2264 unsigned int orig_len;
2268 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
2269 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2275 if ((ra | len | offset) & (8UL - 1)) {
2276 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
2277 "ra[%lx] len[%x] offset[%lx]\n",
2278 lp->id, ra, len, offset);
2282 if (lp->hs_state != LDC_HS_COMPLETE ||
2283 (lp->flags & LDC_FLAG_RESET)) {
2284 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
2285 "flags[%x]\n", lp->id, lp->hs_state, lp->flags);
2290 for (i = 0; i < ncookies; i++) {
2291 unsigned long cookie_raddr = cookies[i].cookie_addr;
2292 unsigned long this_len = cookies[i].cookie_size;
2293 unsigned long actual_len;
2295 if (unlikely(offset)) {
2296 unsigned long this_off = offset;
2298 if (this_off > this_len)
2299 this_off = this_len;
2302 this_len -= this_off;
2305 cookie_raddr += this_off;
2312 unsigned long hv_err;
2314 hv_err = sun4v_ldc_copy(lp->id, copy_dir,
2316 this_len, &actual_len);
2317 if (unlikely(hv_err)) {
2318 printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
2321 if (lp->hs_state != LDC_HS_COMPLETE ||
2322 (lp->flags & LDC_FLAG_RESET))
2328 cookie_raddr += actual_len;
2331 if (actual_len == this_len)
2334 this_len -= actual_len;
2341 /* It is caller policy what to do about short copies.
2342 * For example, a networking driver can declare the
2343 * packet a runt and drop it.
2346 return orig_len - len;
2348 EXPORT_SYMBOL(ldc_copy);
2350 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
2351 struct ldc_trans_cookie *cookies, int *ncookies,
2352 unsigned int map_perm)
2357 if (len & (8UL - 1))
2358 return ERR_PTR(-EINVAL);
2360 buf = kzalloc(len, GFP_ATOMIC);
2362 return ERR_PTR(-ENOMEM);
2364 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
2367 return ERR_PTR(err);
2373 EXPORT_SYMBOL(ldc_alloc_exp_dring);
2375 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
2376 struct ldc_trans_cookie *cookies, int ncookies)
2378 ldc_unmap(lp, cookies, ncookies);
2381 EXPORT_SYMBOL(ldc_free_exp_dring);
2383 static int __init ldc_init(void)
2385 unsigned long major, minor;
2386 struct mdesc_handle *hp;
2395 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
2397 if (mp == MDESC_NODE_NULL)
2400 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
2406 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
2407 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
2411 printk(KERN_INFO "%s", version);
2414 printk(KERN_INFO PFX "Domaining disabled.\n");
2417 ldom_domaining_enabled = 1;
2425 core_initcall(ldc_init);