Merge tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[sfrench/cifs-2.6.git] / drivers / net / ethernet / sun / sunvnet_common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3  *
4  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
5  * Copyright (C) 2016-2017 Oracle. All rights reserved.
6  */
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <linux/if_vlan.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/sunvnet.h>
22
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <linux/icmpv6.h>
25 #endif
26
27 #include <net/ip.h>
28 #include <net/icmp.h>
29 #include <net/route.h>
30
31 #include <asm/vio.h>
32 #include <asm/ldc.h>
33
34 #include "sunvnet_common.h"
35
36 /* Heuristic for the number of times to exponentially backoff and
37  * retry sending an LDC trigger when EAGAIN is encountered
38  */
39 #define VNET_MAX_RETRIES        10
40
41 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
42 MODULE_DESCRIPTION("Sun LDOM virtual network support library");
43 MODULE_LICENSE("GPL");
44 MODULE_VERSION("1.1");
45
46 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
47
48 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
49 {
50         return vio_dring_avail(dr, VNET_TX_RING_SIZE);
51 }
52
53 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
54 {
55         struct vio_msg_tag *pkt = arg;
56
57         pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
58                pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
59         pr_err("Resetting connection\n");
60
61         ldc_disconnect(port->vio.lp);
62
63         return -ECONNRESET;
64 }
65
66 static int vnet_port_alloc_tx_ring(struct vnet_port *port);
67
68 int sunvnet_send_attr_common(struct vio_driver_state *vio)
69 {
70         struct vnet_port *port = to_vnet_port(vio);
71         struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
72         struct vio_net_attr_info pkt;
73         int framelen = ETH_FRAME_LEN;
74         int i, err;
75
76         err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
77         if (err)
78                 return err;
79
80         memset(&pkt, 0, sizeof(pkt));
81         pkt.tag.type = VIO_TYPE_CTRL;
82         pkt.tag.stype = VIO_SUBTYPE_INFO;
83         pkt.tag.stype_env = VIO_ATTR_INFO;
84         pkt.tag.sid = vio_send_sid(vio);
85         if (vio_version_before(vio, 1, 2))
86                 pkt.xfer_mode = VIO_DRING_MODE;
87         else
88                 pkt.xfer_mode = VIO_NEW_DRING_MODE;
89         pkt.addr_type = VNET_ADDR_ETHERMAC;
90         pkt.ack_freq = 0;
91         for (i = 0; i < 6; i++)
92                 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
93         if (vio_version_after(vio, 1, 3)) {
94                 if (port->rmtu) {
95                         port->rmtu = min(VNET_MAXPACKET, port->rmtu);
96                         pkt.mtu = port->rmtu;
97                 } else {
98                         port->rmtu = VNET_MAXPACKET;
99                         pkt.mtu = port->rmtu;
100                 }
101                 if (vio_version_after_eq(vio, 1, 6))
102                         pkt.options = VIO_TX_DRING;
103         } else if (vio_version_before(vio, 1, 3)) {
104                 pkt.mtu = framelen;
105         } else { /* v1.3 */
106                 pkt.mtu = framelen + VLAN_HLEN;
107         }
108
109         pkt.cflags = 0;
110         if (vio_version_after_eq(vio, 1, 7) && port->tso) {
111                 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
112                 if (!port->tsolen)
113                         port->tsolen = VNET_MAXTSO;
114                 pkt.ipv4_lso_maxlen = port->tsolen;
115         }
116
117         pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
118
119         viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
120                "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
121                "cflags[0x%04x] lso_max[%u]\n",
122                pkt.xfer_mode, pkt.addr_type,
123                (unsigned long long)pkt.addr,
124                pkt.ack_freq, pkt.plnk_updt, pkt.options,
125                (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
126
127         return vio_ldc_send(vio, &pkt, sizeof(pkt));
128 }
129 EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
130
131 static int handle_attr_info(struct vio_driver_state *vio,
132                             struct vio_net_attr_info *pkt)
133 {
134         struct vnet_port *port = to_vnet_port(vio);
135         u64     localmtu;
136         u8      xfer_mode;
137
138         viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
139                "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
140                " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
141                pkt->xfer_mode, pkt->addr_type,
142                (unsigned long long)pkt->addr,
143                pkt->ack_freq, pkt->plnk_updt, pkt->options,
144                (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
145                pkt->ipv4_lso_maxlen);
146
147         pkt->tag.sid = vio_send_sid(vio);
148
149         xfer_mode = pkt->xfer_mode;
150         /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
151         if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
152                 xfer_mode = VIO_NEW_DRING_MODE;
153
154         /* MTU negotiation:
155          *      < v1.3 - ETH_FRAME_LEN exactly
156          *      > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
157          *                      pkt->mtu for ACK
158          *      = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
159          */
160         if (vio_version_before(vio, 1, 3)) {
161                 localmtu = ETH_FRAME_LEN;
162         } else if (vio_version_after(vio, 1, 3)) {
163                 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
164                 localmtu = min(pkt->mtu, localmtu);
165                 pkt->mtu = localmtu;
166         } else { /* v1.3 */
167                 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
168         }
169         port->rmtu = localmtu;
170
171         /* LSO negotiation */
172         if (vio_version_after_eq(vio, 1, 7))
173                 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
174         else
175                 port->tso = false;
176         if (port->tso) {
177                 if (!port->tsolen)
178                         port->tsolen = VNET_MAXTSO;
179                 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
180                 if (port->tsolen < VNET_MINTSO) {
181                         port->tso = false;
182                         port->tsolen = 0;
183                         pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
184                 }
185                 pkt->ipv4_lso_maxlen = port->tsolen;
186         } else {
187                 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
188                 pkt->ipv4_lso_maxlen = 0;
189                 port->tsolen = 0;
190         }
191
192         /* for version >= 1.6, ACK packet mode we support */
193         if (vio_version_after_eq(vio, 1, 6)) {
194                 pkt->xfer_mode = VIO_NEW_DRING_MODE;
195                 pkt->options = VIO_TX_DRING;
196         }
197
198         if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
199             pkt->addr_type != VNET_ADDR_ETHERMAC ||
200             pkt->mtu != localmtu) {
201                 viodbg(HS, "SEND NET ATTR NACK\n");
202
203                 pkt->tag.stype = VIO_SUBTYPE_NACK;
204
205                 (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
206
207                 return -ECONNRESET;
208         }
209
210         viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
211                "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
212                "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
213                pkt->xfer_mode, pkt->addr_type,
214                (unsigned long long)pkt->addr,
215                pkt->ack_freq, pkt->plnk_updt, pkt->options,
216                (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
217                pkt->ipv4_lso_maxlen);
218
219         pkt->tag.stype = VIO_SUBTYPE_ACK;
220
221         return vio_ldc_send(vio, pkt, sizeof(*pkt));
222 }
223
224 static int handle_attr_ack(struct vio_driver_state *vio,
225                            struct vio_net_attr_info *pkt)
226 {
227         viodbg(HS, "GOT NET ATTR ACK\n");
228
229         return 0;
230 }
231
232 static int handle_attr_nack(struct vio_driver_state *vio,
233                             struct vio_net_attr_info *pkt)
234 {
235         viodbg(HS, "GOT NET ATTR NACK\n");
236
237         return -ECONNRESET;
238 }
239
240 int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
241 {
242         struct vio_net_attr_info *pkt = arg;
243
244         switch (pkt->tag.stype) {
245         case VIO_SUBTYPE_INFO:
246                 return handle_attr_info(vio, pkt);
247
248         case VIO_SUBTYPE_ACK:
249                 return handle_attr_ack(vio, pkt);
250
251         case VIO_SUBTYPE_NACK:
252                 return handle_attr_nack(vio, pkt);
253
254         default:
255                 return -ECONNRESET;
256         }
257 }
258 EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
259
260 void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
261 {
262         struct vio_dring_state *dr;
263
264         dr = &vio->drings[VIO_DRIVER_RX_RING];
265         dr->rcv_nxt = 1;
266         dr->snd_nxt = 1;
267
268         dr = &vio->drings[VIO_DRIVER_TX_RING];
269         dr->rcv_nxt = 1;
270         dr->snd_nxt = 1;
271 }
272 EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
273
274 /* The hypervisor interface that implements copying to/from imported
275  * memory from another domain requires that copies are done to 8-byte
276  * aligned buffers, and that the lengths of such copies are also 8-byte
277  * multiples.
278  *
279  * So we align skb->data to an 8-byte multiple and pad-out the data
280  * area so we can round the copy length up to the next multiple of
281  * 8 for the copy.
282  *
283  * The transmitter puts the actual start of the packet 6 bytes into
284  * the buffer it sends over, so that the IP headers after the ethernet
285  * header are aligned properly.  These 6 bytes are not in the descriptor
286  * length, they are simply implied.  This offset is represented using
287  * the VNET_PACKET_SKIP macro.
288  */
289 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
290                                            unsigned int len)
291 {
292         struct sk_buff *skb;
293         unsigned long addr, off;
294
295         skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
296         if (unlikely(!skb))
297                 return NULL;
298
299         addr = (unsigned long)skb->data;
300         off = ((addr + 7UL) & ~7UL) - addr;
301         if (off)
302                 skb_reserve(skb, off);
303
304         return skb;
305 }
306
307 static inline void vnet_fullcsum_ipv4(struct sk_buff *skb)
308 {
309         struct iphdr *iph = ip_hdr(skb);
310         int offset = skb_transport_offset(skb);
311
312         if (skb->protocol != htons(ETH_P_IP))
313                 return;
314         if (iph->protocol != IPPROTO_TCP &&
315             iph->protocol != IPPROTO_UDP)
316                 return;
317         skb->ip_summed = CHECKSUM_NONE;
318         skb->csum_level = 1;
319         skb->csum = 0;
320         if (iph->protocol == IPPROTO_TCP) {
321                 struct tcphdr *ptcp = tcp_hdr(skb);
322
323                 ptcp->check = 0;
324                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
325                 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
326                                                 skb->len - offset, IPPROTO_TCP,
327                                                 skb->csum);
328         } else if (iph->protocol == IPPROTO_UDP) {
329                 struct udphdr *pudp = udp_hdr(skb);
330
331                 pudp->check = 0;
332                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
333                 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
334                                                 skb->len - offset, IPPROTO_UDP,
335                                                 skb->csum);
336         }
337 }
338
339 #if IS_ENABLED(CONFIG_IPV6)
340 static inline void vnet_fullcsum_ipv6(struct sk_buff *skb)
341 {
342         struct ipv6hdr *ip6h = ipv6_hdr(skb);
343         int offset = skb_transport_offset(skb);
344
345         if (skb->protocol != htons(ETH_P_IPV6))
346                 return;
347         if (ip6h->nexthdr != IPPROTO_TCP &&
348             ip6h->nexthdr != IPPROTO_UDP)
349                 return;
350         skb->ip_summed = CHECKSUM_NONE;
351         skb->csum_level = 1;
352         skb->csum = 0;
353         if (ip6h->nexthdr == IPPROTO_TCP) {
354                 struct tcphdr *ptcp = tcp_hdr(skb);
355
356                 ptcp->check = 0;
357                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
358                 ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
359                                               skb->len - offset, IPPROTO_TCP,
360                                               skb->csum);
361         } else if (ip6h->nexthdr == IPPROTO_UDP) {
362                 struct udphdr *pudp = udp_hdr(skb);
363
364                 pudp->check = 0;
365                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
366                 pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
367                                               skb->len - offset, IPPROTO_UDP,
368                                               skb->csum);
369         }
370 }
371 #endif
372
373 static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
374 {
375         struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
376         unsigned int len = desc->size;
377         unsigned int copy_len;
378         struct sk_buff *skb;
379         int maxlen;
380         int err;
381
382         err = -EMSGSIZE;
383         if (port->tso && port->tsolen > port->rmtu)
384                 maxlen = port->tsolen;
385         else
386                 maxlen = port->rmtu;
387         if (unlikely(len < ETH_ZLEN || len > maxlen)) {
388                 dev->stats.rx_length_errors++;
389                 goto out_dropped;
390         }
391
392         skb = alloc_and_align_skb(dev, len);
393         err = -ENOMEM;
394         if (unlikely(!skb)) {
395                 dev->stats.rx_missed_errors++;
396                 goto out_dropped;
397         }
398
399         copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
400         skb_put(skb, copy_len);
401         err = ldc_copy(port->vio.lp, LDC_COPY_IN,
402                        skb->data, copy_len, 0,
403                        desc->cookies, desc->ncookies);
404         if (unlikely(err < 0)) {
405                 dev->stats.rx_frame_errors++;
406                 goto out_free_skb;
407         }
408
409         skb_pull(skb, VNET_PACKET_SKIP);
410         skb_trim(skb, len);
411         skb->protocol = eth_type_trans(skb, dev);
412
413         if (vio_version_after_eq(&port->vio, 1, 8)) {
414                 struct vio_net_dext *dext = vio_net_ext(desc);
415
416                 skb_reset_network_header(skb);
417
418                 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
419                         if (skb->protocol == ETH_P_IP) {
420                                 struct iphdr *iph = ip_hdr(skb);
421
422                                 iph->check = 0;
423                                 ip_send_check(iph);
424                         }
425                 }
426                 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
427                     skb->ip_summed == CHECKSUM_NONE) {
428                         if (skb->protocol == htons(ETH_P_IP)) {
429                                 struct iphdr *iph = ip_hdr(skb);
430                                 int ihl = iph->ihl * 4;
431
432                                 skb_set_transport_header(skb, ihl);
433                                 vnet_fullcsum_ipv4(skb);
434 #if IS_ENABLED(CONFIG_IPV6)
435                         } else if (skb->protocol == htons(ETH_P_IPV6)) {
436                                 skb_set_transport_header(skb,
437                                                          sizeof(struct ipv6hdr));
438                                 vnet_fullcsum_ipv6(skb);
439 #endif
440                         }
441                 }
442                 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
443                         skb->ip_summed = CHECKSUM_PARTIAL;
444                         skb->csum_level = 0;
445                         if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
446                                 skb->csum_level = 1;
447                 }
448         }
449
450         skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
451
452         if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
453                 dev->stats.multicast++;
454         dev->stats.rx_packets++;
455         dev->stats.rx_bytes += len;
456         port->stats.rx_packets++;
457         port->stats.rx_bytes += len;
458         napi_gro_receive(&port->napi, skb);
459         return 0;
460
461 out_free_skb:
462         kfree_skb(skb);
463
464 out_dropped:
465         dev->stats.rx_dropped++;
466         return err;
467 }
468
469 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
470                          u32 start, u32 end, u8 vio_dring_state)
471 {
472         struct vio_dring_data hdr = {
473                 .tag = {
474                         .type           = VIO_TYPE_DATA,
475                         .stype          = VIO_SUBTYPE_ACK,
476                         .stype_env      = VIO_DRING_DATA,
477                         .sid            = vio_send_sid(&port->vio),
478                 },
479                 .dring_ident            = dr->ident,
480                 .start_idx              = start,
481                 .end_idx                = end,
482                 .state                  = vio_dring_state,
483         };
484         int err, delay;
485         int retries = 0;
486
487         hdr.seq = dr->snd_nxt;
488         delay = 1;
489         do {
490                 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
491                 if (err > 0) {
492                         dr->snd_nxt++;
493                         break;
494                 }
495                 udelay(delay);
496                 if ((delay <<= 1) > 128)
497                         delay = 128;
498                 if (retries++ > VNET_MAX_RETRIES) {
499                         pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
500                                 port->raddr[0], port->raddr[1],
501                                 port->raddr[2], port->raddr[3],
502                                 port->raddr[4], port->raddr[5]);
503                         break;
504                 }
505         } while (err == -EAGAIN);
506
507         if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
508                 port->stop_rx_idx = end;
509                 port->stop_rx = true;
510         } else {
511                 port->stop_rx_idx = 0;
512                 port->stop_rx = false;
513         }
514
515         return err;
516 }
517
518 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
519                                         struct vio_dring_state *dr,
520                                         u32 index)
521 {
522         struct vio_net_desc *desc = port->vio.desc_buf;
523         int err;
524
525         err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
526                                   (index * dr->entry_size),
527                                   dr->cookies, dr->ncookies);
528         if (err < 0)
529                 return ERR_PTR(err);
530
531         return desc;
532 }
533
534 static int put_rx_desc(struct vnet_port *port,
535                        struct vio_dring_state *dr,
536                        struct vio_net_desc *desc,
537                        u32 index)
538 {
539         int err;
540
541         err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
542                                   (index * dr->entry_size),
543                                   dr->cookies, dr->ncookies);
544         if (err < 0)
545                 return err;
546
547         return 0;
548 }
549
550 static int vnet_walk_rx_one(struct vnet_port *port,
551                             struct vio_dring_state *dr,
552                             u32 index, int *needs_ack)
553 {
554         struct vio_net_desc *desc = get_rx_desc(port, dr, index);
555         struct vio_driver_state *vio = &port->vio;
556         int err;
557
558         BUG_ON(!desc);
559         if (IS_ERR(desc))
560                 return PTR_ERR(desc);
561
562         if (desc->hdr.state != VIO_DESC_READY)
563                 return 1;
564
565         dma_rmb();
566
567         viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
568                desc->hdr.state, desc->hdr.ack,
569                desc->size, desc->ncookies,
570                desc->cookies[0].cookie_addr,
571                desc->cookies[0].cookie_size);
572
573         err = vnet_rx_one(port, desc);
574         if (err == -ECONNRESET)
575                 return err;
576         trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
577                           index, desc->hdr.ack);
578         desc->hdr.state = VIO_DESC_DONE;
579         err = put_rx_desc(port, dr, desc, index);
580         if (err < 0)
581                 return err;
582         *needs_ack = desc->hdr.ack;
583         return 0;
584 }
585
586 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
587                         u32 start, u32 end, int *npkts, int budget)
588 {
589         struct vio_driver_state *vio = &port->vio;
590         int ack_start = -1, ack_end = -1;
591         bool send_ack = true;
592
593         end = (end == (u32)-1) ? vio_dring_prev(dr, start)
594                                : vio_dring_next(dr, end);
595
596         viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
597
598         while (start != end) {
599                 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
600
601                 if (err == -ECONNRESET)
602                         return err;
603                 if (err != 0)
604                         break;
605                 (*npkts)++;
606                 if (ack_start == -1)
607                         ack_start = start;
608                 ack_end = start;
609                 start = vio_dring_next(dr, start);
610                 if (ack && start != end) {
611                         err = vnet_send_ack(port, dr, ack_start, ack_end,
612                                             VIO_DRING_ACTIVE);
613                         if (err == -ECONNRESET)
614                                 return err;
615                         ack_start = -1;
616                 }
617                 if ((*npkts) >= budget) {
618                         send_ack = false;
619                         break;
620                 }
621         }
622         if (unlikely(ack_start == -1)) {
623                 ack_end = vio_dring_prev(dr, start);
624                 ack_start = ack_end;
625         }
626         if (send_ack) {
627                 port->napi_resume = false;
628                 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
629                                                port->vio._peer_sid,
630                                                ack_end, *npkts);
631                 return vnet_send_ack(port, dr, ack_start, ack_end,
632                                      VIO_DRING_STOPPED);
633         } else  {
634                 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
635                                                 port->vio._peer_sid,
636                                                 ack_end, *npkts);
637                 port->napi_resume = true;
638                 port->napi_stop_idx = ack_end;
639                 return 1;
640         }
641 }
642
643 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
644                    int budget)
645 {
646         struct vio_dring_data *pkt = msgbuf;
647         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
648         struct vio_driver_state *vio = &port->vio;
649
650         viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
651                pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
652
653         if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
654                 return 0;
655         if (unlikely(pkt->seq != dr->rcv_nxt)) {
656                 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
657                        pkt->seq, dr->rcv_nxt);
658                 return 0;
659         }
660
661         if (!port->napi_resume)
662                 dr->rcv_nxt++;
663
664         /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
665
666         return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
667                             npkts, budget);
668 }
669
670 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
671 {
672         u32 idx = dr->cons;
673         int found = 0;
674
675         while (idx != dr->prod) {
676                 if (idx == end) {
677                         found = 1;
678                         break;
679                 }
680                 idx = vio_dring_next(dr, idx);
681         }
682         return found;
683 }
684
685 static int vnet_ack(struct vnet_port *port, void *msgbuf)
686 {
687         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
688         struct vio_dring_data *pkt = msgbuf;
689         struct net_device *dev;
690         u32 end;
691         struct vio_net_desc *desc;
692         struct netdev_queue *txq;
693
694         if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
695                 return 0;
696
697         end = pkt->end_idx;
698         dev = VNET_PORT_TO_NET_DEVICE(port);
699         netif_tx_lock(dev);
700         if (unlikely(!idx_is_pending(dr, end))) {
701                 netif_tx_unlock(dev);
702                 return 0;
703         }
704
705         /* sync for race conditions with vnet_start_xmit() and tell xmit it
706          * is time to send a trigger.
707          */
708         trace_vnet_rx_stopped_ack(port->vio._local_sid,
709                                   port->vio._peer_sid, end);
710         dr->cons = vio_dring_next(dr, end);
711         desc = vio_dring_entry(dr, dr->cons);
712         if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
713                 /* vnet_start_xmit() just populated this dring but missed
714                  * sending the "start" LDC message to the consumer.
715                  * Send a "start" trigger on its behalf.
716                  */
717                 if (__vnet_tx_trigger(port, dr->cons) > 0)
718                         port->start_cons = false;
719                 else
720                         port->start_cons = true;
721         } else {
722                 port->start_cons = true;
723         }
724         netif_tx_unlock(dev);
725
726         txq = netdev_get_tx_queue(dev, port->q_index);
727         if (unlikely(netif_tx_queue_stopped(txq) &&
728                      vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
729                 return 1;
730
731         return 0;
732 }
733
734 static int vnet_nack(struct vnet_port *port, void *msgbuf)
735 {
736         /* XXX just reset or similar XXX */
737         return 0;
738 }
739
740 static int handle_mcast(struct vnet_port *port, void *msgbuf)
741 {
742         struct vio_net_mcast_info *pkt = msgbuf;
743         struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
744
745         if (pkt->tag.stype != VIO_SUBTYPE_ACK)
746                 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
747                        dev->name,
748                        pkt->tag.type,
749                        pkt->tag.stype,
750                        pkt->tag.stype_env,
751                        pkt->tag.sid);
752
753         return 0;
754 }
755
756 /* If the queue is stopped, wake it up so that we'll
757  * send out another START message at the next TX.
758  */
759 static void maybe_tx_wakeup(struct vnet_port *port)
760 {
761         struct netdev_queue *txq;
762
763         txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
764                                   port->q_index);
765         __netif_tx_lock(txq, smp_processor_id());
766         if (likely(netif_tx_queue_stopped(txq)))
767                 netif_tx_wake_queue(txq);
768         __netif_tx_unlock(txq);
769 }
770
771 bool sunvnet_port_is_up_common(struct vnet_port *vnet)
772 {
773         struct vio_driver_state *vio = &vnet->vio;
774
775         return !!(vio->hs_state & VIO_HS_COMPLETE);
776 }
777 EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
778
779 static int vnet_event_napi(struct vnet_port *port, int budget)
780 {
781         struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
782         struct vio_driver_state *vio = &port->vio;
783         int tx_wakeup, err;
784         int npkts = 0;
785
786         /* we don't expect any other bits */
787         BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY |
788                                   LDC_EVENT_RESET |
789                                   LDC_EVENT_UP));
790
791         /* RESET takes precedent over any other event */
792         if (port->rx_event & LDC_EVENT_RESET) {
793                 /* a link went down */
794
795                 if (port->vsw == 1) {
796                         netif_tx_stop_all_queues(dev);
797                         netif_carrier_off(dev);
798                 }
799
800                 vio_link_state_change(vio, LDC_EVENT_RESET);
801                 vnet_port_reset(port);
802                 vio_port_up(vio);
803
804                 /* If the device is running but its tx queue was
805                  * stopped (due to flow control), restart it.
806                  * This is necessary since vnet_port_reset()
807                  * clears the tx drings and thus we may never get
808                  * back a VIO_TYPE_DATA ACK packet - which is
809                  * the normal mechanism to restart the tx queue.
810                  */
811                 if (netif_running(dev))
812                         maybe_tx_wakeup(port);
813
814                 port->rx_event = 0;
815                 port->stats.event_reset++;
816                 return 0;
817         }
818
819         if (port->rx_event & LDC_EVENT_UP) {
820                 /* a link came up */
821
822                 if (port->vsw == 1) {
823                         netif_carrier_on(port->dev);
824                         netif_tx_start_all_queues(port->dev);
825                 }
826
827                 vio_link_state_change(vio, LDC_EVENT_UP);
828                 port->rx_event = 0;
829                 port->stats.event_up++;
830                 return 0;
831         }
832
833         err = 0;
834         tx_wakeup = 0;
835         while (1) {
836                 union {
837                         struct vio_msg_tag tag;
838                         u64 raw[8];
839                 } msgbuf;
840
841                 if (port->napi_resume) {
842                         struct vio_dring_data *pkt =
843                                 (struct vio_dring_data *)&msgbuf;
844                         struct vio_dring_state *dr =
845                                 &port->vio.drings[VIO_DRIVER_RX_RING];
846
847                         pkt->tag.type = VIO_TYPE_DATA;
848                         pkt->tag.stype = VIO_SUBTYPE_INFO;
849                         pkt->tag.stype_env = VIO_DRING_DATA;
850                         pkt->seq = dr->rcv_nxt;
851                         pkt->start_idx = vio_dring_next(dr,
852                                                         port->napi_stop_idx);
853                         pkt->end_idx = -1;
854                 } else {
855                         err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
856                         if (unlikely(err < 0)) {
857                                 if (err == -ECONNRESET)
858                                         vio_conn_reset(vio);
859                                 break;
860                         }
861                         if (err == 0)
862                                 break;
863                         viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
864                                msgbuf.tag.type,
865                                msgbuf.tag.stype,
866                                msgbuf.tag.stype_env,
867                                msgbuf.tag.sid);
868                         err = vio_validate_sid(vio, &msgbuf.tag);
869                         if (err < 0)
870                                 break;
871                 }
872
873                 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
874                         if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
875                                 if (!sunvnet_port_is_up_common(port)) {
876                                         /* failures like handshake_failure()
877                                          * may have cleaned up dring, but
878                                          * NAPI polling may bring us here.
879                                          */
880                                         err = -ECONNRESET;
881                                         break;
882                                 }
883                                 err = vnet_rx(port, &msgbuf, &npkts, budget);
884                                 if (npkts >= budget)
885                                         break;
886                                 if (npkts == 0)
887                                         break;
888                         } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
889                                 err = vnet_ack(port, &msgbuf);
890                                 if (err > 0)
891                                         tx_wakeup |= err;
892                         } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
893                                 err = vnet_nack(port, &msgbuf);
894                         }
895                 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
896                         if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
897                                 err = handle_mcast(port, &msgbuf);
898                         else
899                                 err = vio_control_pkt_engine(vio, &msgbuf);
900                         if (err)
901                                 break;
902                 } else {
903                         err = vnet_handle_unknown(port, &msgbuf);
904                 }
905                 if (err == -ECONNRESET)
906                         break;
907         }
908         if (unlikely(tx_wakeup && err != -ECONNRESET))
909                 maybe_tx_wakeup(port);
910         return npkts;
911 }
912
913 int sunvnet_poll_common(struct napi_struct *napi, int budget)
914 {
915         struct vnet_port *port = container_of(napi, struct vnet_port, napi);
916         struct vio_driver_state *vio = &port->vio;
917         int processed = vnet_event_napi(port, budget);
918
919         if (processed < budget) {
920                 napi_complete_done(napi, processed);
921                 port->rx_event &= ~LDC_EVENT_DATA_READY;
922                 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
923         }
924         return processed;
925 }
926 EXPORT_SYMBOL_GPL(sunvnet_poll_common);
927
928 void sunvnet_event_common(void *arg, int event)
929 {
930         struct vnet_port *port = arg;
931         struct vio_driver_state *vio = &port->vio;
932
933         port->rx_event |= event;
934         vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
935         napi_schedule(&port->napi);
936 }
937 EXPORT_SYMBOL_GPL(sunvnet_event_common);
938
939 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
940 {
941         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
942         struct vio_dring_data hdr = {
943                 .tag = {
944                         .type           = VIO_TYPE_DATA,
945                         .stype          = VIO_SUBTYPE_INFO,
946                         .stype_env      = VIO_DRING_DATA,
947                         .sid            = vio_send_sid(&port->vio),
948                 },
949                 .dring_ident            = dr->ident,
950                 .start_idx              = start,
951                 .end_idx                = (u32)-1,
952         };
953         int err, delay;
954         int retries = 0;
955
956         if (port->stop_rx) {
957                 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
958                                                   port->vio._peer_sid,
959                                                   port->stop_rx_idx, -1);
960                 err = vnet_send_ack(port,
961                                     &port->vio.drings[VIO_DRIVER_RX_RING],
962                                     port->stop_rx_idx, -1,
963                                     VIO_DRING_STOPPED);
964                 if (err <= 0)
965                         return err;
966         }
967
968         hdr.seq = dr->snd_nxt;
969         delay = 1;
970         do {
971                 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
972                 if (err > 0) {
973                         dr->snd_nxt++;
974                         break;
975                 }
976                 udelay(delay);
977                 if ((delay <<= 1) > 128)
978                         delay = 128;
979                 if (retries++ > VNET_MAX_RETRIES)
980                         break;
981         } while (err == -EAGAIN);
982         trace_vnet_tx_trigger(port->vio._local_sid,
983                               port->vio._peer_sid, start, err);
984
985         return err;
986 }
987
988 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
989                                           unsigned *pending)
990 {
991         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
992         struct sk_buff *skb = NULL;
993         int i, txi;
994
995         *pending = 0;
996
997         txi = dr->prod;
998         for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
999                 struct vio_net_desc *d;
1000
1001                 --txi;
1002                 if (txi < 0)
1003                         txi = VNET_TX_RING_SIZE - 1;
1004
1005                 d = vio_dring_entry(dr, txi);
1006
1007                 if (d->hdr.state == VIO_DESC_READY) {
1008                         (*pending)++;
1009                         continue;
1010                 }
1011                 if (port->tx_bufs[txi].skb) {
1012                         if (d->hdr.state != VIO_DESC_DONE)
1013                                 pr_notice("invalid ring buffer state %d\n",
1014                                           d->hdr.state);
1015                         BUG_ON(port->tx_bufs[txi].skb->next);
1016
1017                         port->tx_bufs[txi].skb->next = skb;
1018                         skb = port->tx_bufs[txi].skb;
1019                         port->tx_bufs[txi].skb = NULL;
1020
1021                         ldc_unmap(port->vio.lp,
1022                                   port->tx_bufs[txi].cookies,
1023                                   port->tx_bufs[txi].ncookies);
1024                 } else if (d->hdr.state == VIO_DESC_FREE) {
1025                         break;
1026                 }
1027                 d->hdr.state = VIO_DESC_FREE;
1028         }
1029         return skb;
1030 }
1031
1032 static inline void vnet_free_skbs(struct sk_buff *skb)
1033 {
1034         struct sk_buff *next;
1035
1036         while (skb) {
1037                 next = skb->next;
1038                 skb->next = NULL;
1039                 dev_kfree_skb(skb);
1040                 skb = next;
1041         }
1042 }
1043
1044 void sunvnet_clean_timer_expire_common(struct timer_list *t)
1045 {
1046         struct vnet_port *port = from_timer(port, t, clean_timer);
1047         struct sk_buff *freeskbs;
1048         unsigned pending;
1049
1050         netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
1051         freeskbs = vnet_clean_tx_ring(port, &pending);
1052         netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
1053
1054         vnet_free_skbs(freeskbs);
1055
1056         if (pending)
1057                 (void)mod_timer(&port->clean_timer,
1058                                 jiffies + VNET_CLEAN_TIMEOUT);
1059          else
1060                 del_timer(&port->clean_timer);
1061 }
1062 EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
1063
1064 static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1065                                struct ldc_trans_cookie *cookies, int ncookies,
1066                                unsigned int map_perm)
1067 {
1068         int i, nc, err, blen;
1069
1070         /* header */
1071         blen = skb_headlen(skb);
1072         if (blen < ETH_ZLEN)
1073                 blen = ETH_ZLEN;
1074         blen += VNET_PACKET_SKIP;
1075         blen += 8 - (blen & 7);
1076
1077         err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
1078                              ncookies, map_perm);
1079         if (err < 0)
1080                 return err;
1081         nc = err;
1082
1083         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1084                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1085                 u8 *vaddr;
1086
1087                 if (nc < ncookies) {
1088                         vaddr = kmap_atomic(skb_frag_page(f));
1089                         blen = skb_frag_size(f);
1090                         blen += 8 - (blen & 7);
1091                         err = ldc_map_single(lp, vaddr + f->page_offset,
1092                                              blen, cookies + nc, ncookies - nc,
1093                                              map_perm);
1094                         kunmap_atomic(vaddr);
1095                 } else {
1096                         err = -EMSGSIZE;
1097                 }
1098
1099                 if (err < 0) {
1100                         ldc_unmap(lp, cookies, nc);
1101                         return err;
1102                 }
1103                 nc += err;
1104         }
1105         return nc;
1106 }
1107
1108 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1109 {
1110         struct sk_buff *nskb;
1111         int i, len, pad, docopy;
1112
1113         len = skb->len;
1114         pad = 0;
1115         if (len < ETH_ZLEN) {
1116                 pad += ETH_ZLEN - skb->len;
1117                 len += pad;
1118         }
1119         len += VNET_PACKET_SKIP;
1120         pad += 8 - (len & 7);
1121
1122         /* make sure we have enough cookies and alignment in every frag */
1123         docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1124         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1125                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1126
1127                 docopy |= f->page_offset & 7;
1128         }
1129         if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1130             skb_tailroom(skb) < pad ||
1131             skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1132                 int start = 0, offset;
1133                 __wsum csum;
1134
1135                 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1136                 nskb = alloc_and_align_skb(skb->dev, len);
1137                 if (!nskb) {
1138                         dev_kfree_skb(skb);
1139                         return NULL;
1140                 }
1141                 skb_reserve(nskb, VNET_PACKET_SKIP);
1142
1143                 nskb->protocol = skb->protocol;
1144                 offset = skb_mac_header(skb) - skb->data;
1145                 skb_set_mac_header(nskb, offset);
1146                 offset = skb_network_header(skb) - skb->data;
1147                 skb_set_network_header(nskb, offset);
1148                 offset = skb_transport_header(skb) - skb->data;
1149                 skb_set_transport_header(nskb, offset);
1150
1151                 offset = 0;
1152                 nskb->csum_offset = skb->csum_offset;
1153                 nskb->ip_summed = skb->ip_summed;
1154
1155                 if (skb->ip_summed == CHECKSUM_PARTIAL)
1156                         start = skb_checksum_start_offset(skb);
1157                 if (start) {
1158                         int offset = start + nskb->csum_offset;
1159
1160                         /* copy the headers, no csum here */
1161                         if (skb_copy_bits(skb, 0, nskb->data, start)) {
1162                                 dev_kfree_skb(nskb);
1163                                 dev_kfree_skb(skb);
1164                                 return NULL;
1165                         }
1166
1167                         /* copy the rest, with csum calculation */
1168                         *(__sum16 *)(skb->data + offset) = 0;
1169                         csum = skb_copy_and_csum_bits(skb, start,
1170                                                       nskb->data + start,
1171                                                       skb->len - start, 0);
1172
1173                         /* add in the header checksums */
1174                         if (skb->protocol == htons(ETH_P_IP)) {
1175                                 struct iphdr *iph = ip_hdr(nskb);
1176
1177                                 if (iph->protocol == IPPROTO_TCP ||
1178                                     iph->protocol == IPPROTO_UDP) {
1179                                         csum = csum_tcpudp_magic(iph->saddr,
1180                                                                  iph->daddr,
1181                                                                  skb->len - start,
1182                                                                  iph->protocol,
1183                                                                  csum);
1184                                 }
1185                         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1186                                 struct ipv6hdr *ip6h = ipv6_hdr(nskb);
1187
1188                                 if (ip6h->nexthdr == IPPROTO_TCP ||
1189                                     ip6h->nexthdr == IPPROTO_UDP) {
1190                                         csum = csum_ipv6_magic(&ip6h->saddr,
1191                                                                &ip6h->daddr,
1192                                                                skb->len - start,
1193                                                                ip6h->nexthdr,
1194                                                                csum);
1195                                 }
1196                         }
1197
1198                         /* save the final result */
1199                         *(__sum16 *)(nskb->data + offset) = csum;
1200
1201                         nskb->ip_summed = CHECKSUM_NONE;
1202                 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1203                         dev_kfree_skb(nskb);
1204                         dev_kfree_skb(skb);
1205                         return NULL;
1206                 }
1207                 (void)skb_put(nskb, skb->len);
1208                 if (skb_is_gso(skb)) {
1209                         skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1210                         skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1211                 }
1212                 nskb->queue_mapping = skb->queue_mapping;
1213                 dev_kfree_skb(skb);
1214                 skb = nskb;
1215         }
1216         return skb;
1217 }
1218
1219 static netdev_tx_t
1220 vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1221                      struct vnet_port *(*vnet_tx_port)
1222                      (struct sk_buff *, struct net_device *))
1223 {
1224         struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
1225         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1226         struct sk_buff *segs;
1227         int maclen, datalen;
1228         int status;
1229         int gso_size, gso_type, gso_segs;
1230         int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1231         int proto = IPPROTO_IP;
1232
1233         if (skb->protocol == htons(ETH_P_IP))
1234                 proto = ip_hdr(skb)->protocol;
1235         else if (skb->protocol == htons(ETH_P_IPV6))
1236                 proto = ipv6_hdr(skb)->nexthdr;
1237
1238         if (proto == IPPROTO_TCP) {
1239                 hlen += tcp_hdr(skb)->doff * 4;
1240         } else if (proto == IPPROTO_UDP) {
1241                 hlen += sizeof(struct udphdr);
1242         } else {
1243                 pr_err("vnet_handle_offloads GSO with unknown transport "
1244                        "protocol %d tproto %d\n", skb->protocol, proto);
1245                 hlen = 128; /* XXX */
1246         }
1247         datalen = port->tsolen - hlen;
1248
1249         gso_size = skb_shinfo(skb)->gso_size;
1250         gso_type = skb_shinfo(skb)->gso_type;
1251         gso_segs = skb_shinfo(skb)->gso_segs;
1252
1253         if (port->tso && gso_size < datalen)
1254                 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1255
1256         if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1257                 struct netdev_queue *txq;
1258
1259                 txq  = netdev_get_tx_queue(dev, port->q_index);
1260                 netif_tx_stop_queue(txq);
1261                 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1262                         return NETDEV_TX_BUSY;
1263                 netif_tx_wake_queue(txq);
1264         }
1265
1266         maclen = skb_network_header(skb) - skb_mac_header(skb);
1267         skb_pull(skb, maclen);
1268
1269         if (port->tso && gso_size < datalen) {
1270                 if (skb_unclone(skb, GFP_ATOMIC))
1271                         goto out_dropped;
1272
1273                 /* segment to TSO size */
1274                 skb_shinfo(skb)->gso_size = datalen;
1275                 skb_shinfo(skb)->gso_segs = gso_segs;
1276         }
1277         segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1278         if (IS_ERR(segs))
1279                 goto out_dropped;
1280
1281         skb_push(skb, maclen);
1282         skb_reset_mac_header(skb);
1283
1284         status = 0;
1285         while (segs) {
1286                 struct sk_buff *curr = segs;
1287
1288                 segs = segs->next;
1289                 curr->next = NULL;
1290                 if (port->tso && curr->len > dev->mtu) {
1291                         skb_shinfo(curr)->gso_size = gso_size;
1292                         skb_shinfo(curr)->gso_type = gso_type;
1293                         skb_shinfo(curr)->gso_segs =
1294                                 DIV_ROUND_UP(curr->len - hlen, gso_size);
1295                 } else {
1296                         skb_shinfo(curr)->gso_size = 0;
1297                 }
1298
1299                 skb_push(curr, maclen);
1300                 skb_reset_mac_header(curr);
1301                 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1302                        maclen);
1303                 curr->csum_start = skb_transport_header(curr) - curr->head;
1304                 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1305                         curr->csum_offset = offsetof(struct tcphdr, check);
1306                 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1307                         curr->csum_offset = offsetof(struct udphdr, check);
1308
1309                 if (!(status & NETDEV_TX_MASK))
1310                         status = sunvnet_start_xmit_common(curr, dev,
1311                                                            vnet_tx_port);
1312                 if (status & NETDEV_TX_MASK)
1313                         dev_kfree_skb_any(curr);
1314         }
1315
1316         if (!(status & NETDEV_TX_MASK))
1317                 dev_kfree_skb_any(skb);
1318         return status;
1319 out_dropped:
1320         dev->stats.tx_dropped++;
1321         dev_kfree_skb_any(skb);
1322         return NETDEV_TX_OK;
1323 }
1324
1325 netdev_tx_t
1326 sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1327                           struct vnet_port *(*vnet_tx_port)
1328                           (struct sk_buff *, struct net_device *))
1329 {
1330         struct vnet_port *port = NULL;
1331         struct vio_dring_state *dr;
1332         struct vio_net_desc *d;
1333         unsigned int len;
1334         struct sk_buff *freeskbs = NULL;
1335         int i, err, txi;
1336         unsigned pending = 0;
1337         struct netdev_queue *txq;
1338
1339         rcu_read_lock();
1340         port = vnet_tx_port(skb, dev);
1341         if (unlikely(!port))
1342                 goto out_dropped;
1343
1344         if (skb_is_gso(skb) && skb->len > port->tsolen) {
1345                 err = vnet_handle_offloads(port, skb, vnet_tx_port);
1346                 rcu_read_unlock();
1347                 return err;
1348         }
1349
1350         if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1351                 unsigned long localmtu = port->rmtu - ETH_HLEN;
1352
1353                 if (vio_version_after_eq(&port->vio, 1, 3))
1354                         localmtu -= VLAN_HLEN;
1355
1356                 if (skb->protocol == htons(ETH_P_IP)) {
1357                         struct flowi4 fl4;
1358                         struct rtable *rt = NULL;
1359
1360                         memset(&fl4, 0, sizeof(fl4));
1361                         fl4.flowi4_oif = dev->ifindex;
1362                         fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1363                         fl4.daddr = ip_hdr(skb)->daddr;
1364                         fl4.saddr = ip_hdr(skb)->saddr;
1365
1366                         rt = ip_route_output_key(dev_net(dev), &fl4);
1367                         if (!IS_ERR(rt)) {
1368                                 skb_dst_set(skb, &rt->dst);
1369                                 icmp_send(skb, ICMP_DEST_UNREACH,
1370                                           ICMP_FRAG_NEEDED,
1371                                           htonl(localmtu));
1372                         }
1373                 }
1374 #if IS_ENABLED(CONFIG_IPV6)
1375                 else if (skb->protocol == htons(ETH_P_IPV6))
1376                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1377 #endif
1378                 goto out_dropped;
1379         }
1380
1381         skb = vnet_skb_shape(skb, 2);
1382
1383         if (unlikely(!skb))
1384                 goto out_dropped;
1385
1386         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1387                 if (skb->protocol == htons(ETH_P_IP))
1388                         vnet_fullcsum_ipv4(skb);
1389 #if IS_ENABLED(CONFIG_IPV6)
1390                 else if (skb->protocol == htons(ETH_P_IPV6))
1391                         vnet_fullcsum_ipv6(skb);
1392 #endif
1393         }
1394
1395         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1396         i = skb_get_queue_mapping(skb);
1397         txq = netdev_get_tx_queue(dev, i);
1398         if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1399                 if (!netif_tx_queue_stopped(txq)) {
1400                         netif_tx_stop_queue(txq);
1401
1402                         /* This is a hard error, log it. */
1403                         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1404                         dev->stats.tx_errors++;
1405                 }
1406                 rcu_read_unlock();
1407                 return NETDEV_TX_BUSY;
1408         }
1409
1410         d = vio_dring_cur(dr);
1411
1412         txi = dr->prod;
1413
1414         freeskbs = vnet_clean_tx_ring(port, &pending);
1415
1416         BUG_ON(port->tx_bufs[txi].skb);
1417
1418         len = skb->len;
1419         if (len < ETH_ZLEN)
1420                 len = ETH_ZLEN;
1421
1422         err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1423                            (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1424         if (err < 0) {
1425                 netdev_info(dev, "tx buffer map error %d\n", err);
1426                 goto out_dropped;
1427         }
1428
1429         port->tx_bufs[txi].skb = skb;
1430         skb = NULL;
1431         port->tx_bufs[txi].ncookies = err;
1432
1433         /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1434          * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1435          * the protocol itself does not require it as long as the peer
1436          * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1437          *
1438          * An ACK for every packet in the ring is expensive as the
1439          * sending of LDC messages is slow and affects performance.
1440          */
1441         d->hdr.ack = VIO_ACK_DISABLE;
1442         d->size = len;
1443         d->ncookies = port->tx_bufs[txi].ncookies;
1444         for (i = 0; i < d->ncookies; i++)
1445                 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1446         if (vio_version_after_eq(&port->vio, 1, 7)) {
1447                 struct vio_net_dext *dext = vio_net_ext(d);
1448
1449                 memset(dext, 0, sizeof(*dext));
1450                 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1451                         dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1452                                              ->gso_size;
1453                         dext->flags |= VNET_PKT_IPV4_LSO;
1454                 }
1455                 if (vio_version_after_eq(&port->vio, 1, 8) &&
1456                     !port->switch_port) {
1457                         dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1458                         dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1459                 }
1460         }
1461
1462         /* This has to be a non-SMP write barrier because we are writing
1463          * to memory which is shared with the peer LDOM.
1464          */
1465         dma_wmb();
1466
1467         d->hdr.state = VIO_DESC_READY;
1468
1469         /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1470          * to notify the consumer that some descriptors are READY.
1471          * After that "start" trigger, no additional triggers are needed until
1472          * a DRING_STOPPED is received from the consumer. The dr->cons field
1473          * (set up by vnet_ack()) has the value of the next dring index
1474          * that has not yet been ack-ed. We send a "start" trigger here
1475          * if, and only if, start_cons is true (reset it afterward). Conversely,
1476          * vnet_ack() should check if the dring corresponding to cons
1477          * is marked READY, but start_cons was false.
1478          * If so, vnet_ack() should send out the missed "start" trigger.
1479          *
1480          * Note that the dma_wmb() above makes sure the cookies et al. are
1481          * not globally visible before the VIO_DESC_READY, and that the
1482          * stores are ordered correctly by the compiler. The consumer will
1483          * not proceed until the VIO_DESC_READY is visible assuring that
1484          * the consumer does not observe anything related to descriptors
1485          * out of order. The HV trap from the LDC start trigger is the
1486          * producer to consumer announcement that work is available to the
1487          * consumer
1488          */
1489         if (!port->start_cons) { /* previous trigger suffices */
1490                 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1491                                            port->vio._peer_sid, dr->cons);
1492                 goto ldc_start_done;
1493         }
1494
1495         err = __vnet_tx_trigger(port, dr->cons);
1496         if (unlikely(err < 0)) {
1497                 netdev_info(dev, "TX trigger error %d\n", err);
1498                 d->hdr.state = VIO_DESC_FREE;
1499                 skb = port->tx_bufs[txi].skb;
1500                 port->tx_bufs[txi].skb = NULL;
1501                 dev->stats.tx_carrier_errors++;
1502                 goto out_dropped;
1503         }
1504
1505 ldc_start_done:
1506         port->start_cons = false;
1507
1508         dev->stats.tx_packets++;
1509         dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1510         port->stats.tx_packets++;
1511         port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1512
1513         dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1514         if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1515                 netif_tx_stop_queue(txq);
1516                 smp_rmb();
1517                 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1518                         netif_tx_wake_queue(txq);
1519         }
1520
1521         (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1522         rcu_read_unlock();
1523
1524         vnet_free_skbs(freeskbs);
1525
1526         return NETDEV_TX_OK;
1527
1528 out_dropped:
1529         if (pending)
1530                 (void)mod_timer(&port->clean_timer,
1531                                 jiffies + VNET_CLEAN_TIMEOUT);
1532         else if (port)
1533                 del_timer(&port->clean_timer);
1534         rcu_read_unlock();
1535         if (skb)
1536                 dev_kfree_skb(skb);
1537         vnet_free_skbs(freeskbs);
1538         dev->stats.tx_dropped++;
1539         return NETDEV_TX_OK;
1540 }
1541 EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1542
1543 void sunvnet_tx_timeout_common(struct net_device *dev)
1544 {
1545         /* XXX Implement me XXX */
1546 }
1547 EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1548
1549 int sunvnet_open_common(struct net_device *dev)
1550 {
1551         netif_carrier_on(dev);
1552         netif_tx_start_all_queues(dev);
1553
1554         return 0;
1555 }
1556 EXPORT_SYMBOL_GPL(sunvnet_open_common);
1557
1558 int sunvnet_close_common(struct net_device *dev)
1559 {
1560         netif_tx_stop_all_queues(dev);
1561         netif_carrier_off(dev);
1562
1563         return 0;
1564 }
1565 EXPORT_SYMBOL_GPL(sunvnet_close_common);
1566
1567 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1568 {
1569         struct vnet_mcast_entry *m;
1570
1571         for (m = vp->mcast_list; m; m = m->next) {
1572                 if (ether_addr_equal(m->addr, addr))
1573                         return m;
1574         }
1575         return NULL;
1576 }
1577
1578 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1579 {
1580         struct netdev_hw_addr *ha;
1581
1582         netdev_for_each_mc_addr(ha, dev) {
1583                 struct vnet_mcast_entry *m;
1584
1585                 m = __vnet_mc_find(vp, ha->addr);
1586                 if (m) {
1587                         m->hit = 1;
1588                         continue;
1589                 }
1590
1591                 if (!m) {
1592                         m = kzalloc(sizeof(*m), GFP_ATOMIC);
1593                         if (!m)
1594                                 continue;
1595                         memcpy(m->addr, ha->addr, ETH_ALEN);
1596                         m->hit = 1;
1597
1598                         m->next = vp->mcast_list;
1599                         vp->mcast_list = m;
1600                 }
1601         }
1602 }
1603
1604 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1605 {
1606         struct vio_net_mcast_info info;
1607         struct vnet_mcast_entry *m, **pp;
1608         int n_addrs;
1609
1610         memset(&info, 0, sizeof(info));
1611
1612         info.tag.type = VIO_TYPE_CTRL;
1613         info.tag.stype = VIO_SUBTYPE_INFO;
1614         info.tag.stype_env = VNET_MCAST_INFO;
1615         info.tag.sid = vio_send_sid(&port->vio);
1616         info.set = 1;
1617
1618         n_addrs = 0;
1619         for (m = vp->mcast_list; m; m = m->next) {
1620                 if (m->sent)
1621                         continue;
1622                 m->sent = 1;
1623                 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1624                        m->addr, ETH_ALEN);
1625                 if (++n_addrs == VNET_NUM_MCAST) {
1626                         info.count = n_addrs;
1627
1628                         (void)vio_ldc_send(&port->vio, &info,
1629                                            sizeof(info));
1630                         n_addrs = 0;
1631                 }
1632         }
1633         if (n_addrs) {
1634                 info.count = n_addrs;
1635                 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
1636         }
1637
1638         info.set = 0;
1639
1640         n_addrs = 0;
1641         pp = &vp->mcast_list;
1642         while ((m = *pp) != NULL) {
1643                 if (m->hit) {
1644                         m->hit = 0;
1645                         pp = &m->next;
1646                         continue;
1647                 }
1648
1649                 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1650                        m->addr, ETH_ALEN);
1651                 if (++n_addrs == VNET_NUM_MCAST) {
1652                         info.count = n_addrs;
1653                         (void)vio_ldc_send(&port->vio, &info,
1654                                            sizeof(info));
1655                         n_addrs = 0;
1656                 }
1657
1658                 *pp = m->next;
1659                 kfree(m);
1660         }
1661         if (n_addrs) {
1662                 info.count = n_addrs;
1663                 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
1664         }
1665 }
1666
1667 void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
1668 {
1669         struct vnet_port *port;
1670
1671         rcu_read_lock();
1672         list_for_each_entry_rcu(port, &vp->port_list, list) {
1673                 if (port->switch_port) {
1674                         __update_mc_list(vp, dev);
1675                         __send_mc_list(vp, port);
1676                         break;
1677                 }
1678         }
1679         rcu_read_unlock();
1680 }
1681 EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1682
1683 int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1684 {
1685         return -EINVAL;
1686 }
1687 EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1688
1689 void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1690 {
1691         struct vio_dring_state *dr;
1692         int i;
1693
1694         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1695
1696         if (!dr->base)
1697                 return;
1698
1699         for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1700                 struct vio_net_desc *d;
1701                 void *skb = port->tx_bufs[i].skb;
1702
1703                 if (!skb)
1704                         continue;
1705
1706                 d = vio_dring_entry(dr, i);
1707
1708                 ldc_unmap(port->vio.lp,
1709                           port->tx_bufs[i].cookies,
1710                           port->tx_bufs[i].ncookies);
1711                 dev_kfree_skb(skb);
1712                 port->tx_bufs[i].skb = NULL;
1713                 d->hdr.state = VIO_DESC_FREE;
1714         }
1715         ldc_free_exp_dring(port->vio.lp, dr->base,
1716                            (dr->entry_size * dr->num_entries),
1717                            dr->cookies, dr->ncookies);
1718         dr->base = NULL;
1719         dr->entry_size = 0;
1720         dr->num_entries = 0;
1721         dr->pending = 0;
1722         dr->ncookies = 0;
1723 }
1724 EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1725
1726 void vnet_port_reset(struct vnet_port *port)
1727 {
1728         del_timer(&port->clean_timer);
1729         sunvnet_port_free_tx_bufs_common(port);
1730         port->rmtu = 0;
1731         port->tso = (port->vsw == 0);  /* no tso in vsw, misbehaves in bridge */
1732         port->tsolen = 0;
1733 }
1734 EXPORT_SYMBOL_GPL(vnet_port_reset);
1735
1736 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1737 {
1738         struct vio_dring_state *dr;
1739         unsigned long len, elen;
1740         int i, err, ncookies;
1741         void *dring;
1742
1743         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1744
1745         elen = sizeof(struct vio_net_desc) +
1746                sizeof(struct ldc_trans_cookie) * 2;
1747         if (vio_version_after_eq(&port->vio, 1, 7))
1748                 elen += sizeof(struct vio_net_dext);
1749         len = VNET_TX_RING_SIZE * elen;
1750
1751         ncookies = VIO_MAX_RING_COOKIES;
1752         dring = ldc_alloc_exp_dring(port->vio.lp, len,
1753                                     dr->cookies, &ncookies,
1754                                     (LDC_MAP_SHADOW |
1755                                      LDC_MAP_DIRECT |
1756                                      LDC_MAP_RW));
1757         if (IS_ERR(dring)) {
1758                 err = PTR_ERR(dring);
1759                 goto err_out;
1760         }
1761
1762         dr->base = dring;
1763         dr->entry_size = elen;
1764         dr->num_entries = VNET_TX_RING_SIZE;
1765         dr->prod = 0;
1766         dr->cons = 0;
1767         port->start_cons  = true; /* need an initial trigger */
1768         dr->pending = VNET_TX_RING_SIZE;
1769         dr->ncookies = ncookies;
1770
1771         for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1772                 struct vio_net_desc *d;
1773
1774                 d = vio_dring_entry(dr, i);
1775                 d->hdr.state = VIO_DESC_FREE;
1776         }
1777         return 0;
1778
1779 err_out:
1780         sunvnet_port_free_tx_bufs_common(port);
1781
1782         return err;
1783 }
1784
1785 #ifdef CONFIG_NET_POLL_CONTROLLER
1786 void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
1787 {
1788         struct vnet_port *port;
1789         unsigned long flags;
1790
1791         spin_lock_irqsave(&vp->lock, flags);
1792         if (!list_empty(&vp->port_list)) {
1793                 port = list_entry(vp->port_list.next, struct vnet_port, list);
1794                 napi_schedule(&port->napi);
1795         }
1796         spin_unlock_irqrestore(&vp->lock, flags);
1797 }
1798 EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1799 #endif
1800
1801 void sunvnet_port_add_txq_common(struct vnet_port *port)
1802 {
1803         struct vnet *vp = port->vp;
1804         int smallest = 0;
1805         int i;
1806
1807         /* find the first least-used q
1808          * When there are more ldoms than q's, we start to
1809          * double up on ports per queue.
1810          */
1811         for (i = 0; i < VNET_MAX_TXQS; i++) {
1812                 if (vp->q_used[i] == 0) {
1813                         smallest = i;
1814                         break;
1815                 }
1816                 if (vp->q_used[i] < vp->q_used[smallest])
1817                         smallest = i;
1818         }
1819
1820         vp->nports++;
1821         vp->q_used[smallest]++;
1822         port->q_index = smallest;
1823 }
1824 EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1825
1826 void sunvnet_port_rm_txq_common(struct vnet_port *port)
1827 {
1828         port->vp->nports--;
1829         port->vp->q_used[port->q_index]--;
1830         port->q_index = 0;
1831 }
1832 EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);