netfilter: nft_reject_bridge: restrict reject to prerouting and input
[sfrench/cifs-2.6.git] / drivers / net / xen-netback / netback.c
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41
42 #include <net/tcp.h>
43
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
50
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = 1;
56 module_param(separate_tx_rx_irq, bool, 0644);
57
58 /* When guest ring is filled up, qdisc queues the packets for us, but we have
59  * to timeout them, otherwise other guests' packets can get stuck there
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63 unsigned int rx_drain_timeout_jiffies;
64
65 unsigned int xenvif_max_queues;
66 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
67 MODULE_PARM_DESC(max_queues,
68                  "Maximum number of queues per virtual interface");
69
70 /*
71  * This is the maximum slots a skb can have. If a guest sends a skb
72  * which exceeds this limit it is considered malicious.
73  */
74 #define FATAL_SKB_SLOTS_DEFAULT 20
75 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
76 module_param(fatal_skb_slots, uint, 0444);
77
78 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
79                                u8 status);
80
81 static void make_tx_response(struct xenvif_queue *queue,
82                              struct xen_netif_tx_request *txp,
83                              s8       st);
84
85 static inline int tx_work_todo(struct xenvif_queue *queue);
86 static inline int rx_work_todo(struct xenvif_queue *queue);
87
88 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
89                                              u16      id,
90                                              s8       st,
91                                              u16      offset,
92                                              u16      size,
93                                              u16      flags);
94
95 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
96                                        u16 idx)
97 {
98         return page_to_pfn(queue->mmap_pages[idx]);
99 }
100
101 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
102                                          u16 idx)
103 {
104         return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
105 }
106
107 #define callback_param(vif, pending_idx) \
108         (vif->pending_tx_info[pending_idx].callback_struct)
109
110 /* Find the containing VIF's structure from a pointer in pending_tx_info array
111  */
112 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
113 {
114         u16 pending_idx = ubuf->desc;
115         struct pending_tx_info *temp =
116                 container_of(ubuf, struct pending_tx_info, callback_struct);
117         return container_of(temp - pending_idx,
118                             struct xenvif_queue,
119                             pending_tx_info[0]);
120 }
121
122 /* This is a miniumum size for the linear area to avoid lots of
123  * calls to __pskb_pull_tail() as we set up checksum offsets. The
124  * value 128 was chosen as it covers all IPv4 and most likely
125  * IPv6 headers.
126  */
127 #define PKT_PROT_LEN 128
128
129 static u16 frag_get_pending_idx(skb_frag_t *frag)
130 {
131         return (u16)frag->page_offset;
132 }
133
134 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
135 {
136         frag->page_offset = pending_idx;
137 }
138
139 static inline pending_ring_idx_t pending_index(unsigned i)
140 {
141         return i & (MAX_PENDING_REQS-1);
142 }
143
144 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
145 {
146         RING_IDX prod, cons;
147
148         do {
149                 prod = queue->rx.sring->req_prod;
150                 cons = queue->rx.req_cons;
151
152                 if (prod - cons >= needed)
153                         return true;
154
155                 queue->rx.sring->req_event = prod + 1;
156
157                 /* Make sure event is visible before we check prod
158                  * again.
159                  */
160                 mb();
161         } while (queue->rx.sring->req_prod != prod);
162
163         return false;
164 }
165
166 /*
167  * Returns true if we should start a new receive buffer instead of
168  * adding 'size' bytes to a buffer which currently contains 'offset'
169  * bytes.
170  */
171 static bool start_new_rx_buffer(int offset, unsigned long size, int head,
172                                 bool full_coalesce)
173 {
174         /* simple case: we have completely filled the current buffer. */
175         if (offset == MAX_BUFFER_OFFSET)
176                 return true;
177
178         /*
179          * complex case: start a fresh buffer if the current frag
180          * would overflow the current buffer but only if:
181          *     (i)   this frag would fit completely in the next buffer
182          * and (ii)  there is already some data in the current buffer
183          * and (iii) this is not the head buffer.
184          * and (iv)  there is no need to fully utilize the buffers
185          *
186          * Where:
187          * - (i) stops us splitting a frag into two copies
188          *   unless the frag is too large for a single buffer.
189          * - (ii) stops us from leaving a buffer pointlessly empty.
190          * - (iii) stops us leaving the first buffer
191          *   empty. Strictly speaking this is already covered
192          *   by (ii) but is explicitly checked because
193          *   netfront relies on the first buffer being
194          *   non-empty and can crash otherwise.
195          * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
196          *   slot
197          *
198          * This means we will effectively linearise small
199          * frags but do not needlessly split large buffers
200          * into multiple copies tend to give large frags their
201          * own buffers as before.
202          */
203         BUG_ON(size > MAX_BUFFER_OFFSET);
204         if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
205             !full_coalesce)
206                 return true;
207
208         return false;
209 }
210
211 struct netrx_pending_operations {
212         unsigned copy_prod, copy_cons;
213         unsigned meta_prod, meta_cons;
214         struct gnttab_copy *copy;
215         struct xenvif_rx_meta *meta;
216         int copy_off;
217         grant_ref_t copy_gref;
218 };
219
220 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
221                                                  struct netrx_pending_operations *npo)
222 {
223         struct xenvif_rx_meta *meta;
224         struct xen_netif_rx_request *req;
225
226         req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
227
228         meta = npo->meta + npo->meta_prod++;
229         meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
230         meta->gso_size = 0;
231         meta->size = 0;
232         meta->id = req->id;
233
234         npo->copy_off = 0;
235         npo->copy_gref = req->gref;
236
237         return meta;
238 }
239
240 struct xenvif_rx_cb {
241         int meta_slots_used;
242         bool full_coalesce;
243 };
244
245 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
246
247 /*
248  * Set up the grant operations for this fragment. If it's a flipping
249  * interface, we also set up the unmap request from here.
250  */
251 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
252                                  struct netrx_pending_operations *npo,
253                                  struct page *page, unsigned long size,
254                                  unsigned long offset, int *head,
255                                  struct xenvif_queue *foreign_queue,
256                                  grant_ref_t foreign_gref)
257 {
258         struct gnttab_copy *copy_gop;
259         struct xenvif_rx_meta *meta;
260         unsigned long bytes;
261         int gso_type = XEN_NETIF_GSO_TYPE_NONE;
262
263         /* Data must not cross a page boundary. */
264         BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
265
266         meta = npo->meta + npo->meta_prod - 1;
267
268         /* Skip unused frames from start of page */
269         page += offset >> PAGE_SHIFT;
270         offset &= ~PAGE_MASK;
271
272         while (size > 0) {
273                 BUG_ON(offset >= PAGE_SIZE);
274                 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
275
276                 bytes = PAGE_SIZE - offset;
277
278                 if (bytes > size)
279                         bytes = size;
280
281                 if (start_new_rx_buffer(npo->copy_off,
282                                         bytes,
283                                         *head,
284                                         XENVIF_RX_CB(skb)->full_coalesce)) {
285                         /*
286                          * Netfront requires there to be some data in the head
287                          * buffer.
288                          */
289                         BUG_ON(*head);
290
291                         meta = get_next_rx_buffer(queue, npo);
292                 }
293
294                 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
295                         bytes = MAX_BUFFER_OFFSET - npo->copy_off;
296
297                 copy_gop = npo->copy + npo->copy_prod++;
298                 copy_gop->flags = GNTCOPY_dest_gref;
299                 copy_gop->len = bytes;
300
301                 if (foreign_queue) {
302                         copy_gop->source.domid = foreign_queue->vif->domid;
303                         copy_gop->source.u.ref = foreign_gref;
304                         copy_gop->flags |= GNTCOPY_source_gref;
305                 } else {
306                         copy_gop->source.domid = DOMID_SELF;
307                         copy_gop->source.u.gmfn =
308                                 virt_to_mfn(page_address(page));
309                 }
310                 copy_gop->source.offset = offset;
311
312                 copy_gop->dest.domid = queue->vif->domid;
313                 copy_gop->dest.offset = npo->copy_off;
314                 copy_gop->dest.u.ref = npo->copy_gref;
315
316                 npo->copy_off += bytes;
317                 meta->size += bytes;
318
319                 offset += bytes;
320                 size -= bytes;
321
322                 /* Next frame */
323                 if (offset == PAGE_SIZE && size) {
324                         BUG_ON(!PageCompound(page));
325                         page++;
326                         offset = 0;
327                 }
328
329                 /* Leave a gap for the GSO descriptor. */
330                 if (skb_is_gso(skb)) {
331                         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
332                                 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
333                         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
334                                 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
335                 }
336
337                 if (*head && ((1 << gso_type) & queue->vif->gso_mask))
338                         queue->rx.req_cons++;
339
340                 *head = 0; /* There must be something in this buffer now. */
341
342         }
343 }
344
345 /*
346  * Find the grant ref for a given frag in a chain of struct ubuf_info's
347  * skb: the skb itself
348  * i: the frag's number
349  * ubuf: a pointer to an element in the chain. It should not be NULL
350  *
351  * Returns a pointer to the element in the chain where the page were found. If
352  * not found, returns NULL.
353  * See the definition of callback_struct in common.h for more details about
354  * the chain.
355  */
356 static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
357                                                 const int i,
358                                                 const struct ubuf_info *ubuf)
359 {
360         struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
361
362         do {
363                 u16 pending_idx = ubuf->desc;
364
365                 if (skb_shinfo(skb)->frags[i].page.p ==
366                     foreign_queue->mmap_pages[pending_idx])
367                         break;
368                 ubuf = (struct ubuf_info *) ubuf->ctx;
369         } while (ubuf);
370
371         return ubuf;
372 }
373
374 /*
375  * Prepare an SKB to be transmitted to the frontend.
376  *
377  * This function is responsible for allocating grant operations, meta
378  * structures, etc.
379  *
380  * It returns the number of meta structures consumed. The number of
381  * ring slots used is always equal to the number of meta slots used
382  * plus the number of GSO descriptors used. Currently, we use either
383  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
384  * frontend-side LRO).
385  */
386 static int xenvif_gop_skb(struct sk_buff *skb,
387                           struct netrx_pending_operations *npo,
388                           struct xenvif_queue *queue)
389 {
390         struct xenvif *vif = netdev_priv(skb->dev);
391         int nr_frags = skb_shinfo(skb)->nr_frags;
392         int i;
393         struct xen_netif_rx_request *req;
394         struct xenvif_rx_meta *meta;
395         unsigned char *data;
396         int head = 1;
397         int old_meta_prod;
398         int gso_type;
399         const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
400         const struct ubuf_info *const head_ubuf = ubuf;
401
402         old_meta_prod = npo->meta_prod;
403
404         gso_type = XEN_NETIF_GSO_TYPE_NONE;
405         if (skb_is_gso(skb)) {
406                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
407                         gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
408                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
409                         gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
410         }
411
412         /* Set up a GSO prefix descriptor, if necessary */
413         if ((1 << gso_type) & vif->gso_prefix_mask) {
414                 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
415                 meta = npo->meta + npo->meta_prod++;
416                 meta->gso_type = gso_type;
417                 meta->gso_size = skb_shinfo(skb)->gso_size;
418                 meta->size = 0;
419                 meta->id = req->id;
420         }
421
422         req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
423         meta = npo->meta + npo->meta_prod++;
424
425         if ((1 << gso_type) & vif->gso_mask) {
426                 meta->gso_type = gso_type;
427                 meta->gso_size = skb_shinfo(skb)->gso_size;
428         } else {
429                 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
430                 meta->gso_size = 0;
431         }
432
433         meta->size = 0;
434         meta->id = req->id;
435         npo->copy_off = 0;
436         npo->copy_gref = req->gref;
437
438         data = skb->data;
439         while (data < skb_tail_pointer(skb)) {
440                 unsigned int offset = offset_in_page(data);
441                 unsigned int len = PAGE_SIZE - offset;
442
443                 if (data + len > skb_tail_pointer(skb))
444                         len = skb_tail_pointer(skb) - data;
445
446                 xenvif_gop_frag_copy(queue, skb, npo,
447                                      virt_to_page(data), len, offset, &head,
448                                      NULL,
449                                      0);
450                 data += len;
451         }
452
453         for (i = 0; i < nr_frags; i++) {
454                 /* This variable also signals whether foreign_gref has a real
455                  * value or not.
456                  */
457                 struct xenvif_queue *foreign_queue = NULL;
458                 grant_ref_t foreign_gref;
459
460                 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
461                         (ubuf->callback == &xenvif_zerocopy_callback)) {
462                         const struct ubuf_info *const startpoint = ubuf;
463
464                         /* Ideally ubuf points to the chain element which
465                          * belongs to this frag. Or if frags were removed from
466                          * the beginning, then shortly before it.
467                          */
468                         ubuf = xenvif_find_gref(skb, i, ubuf);
469
470                         /* Try again from the beginning of the list, if we
471                          * haven't tried from there. This only makes sense in
472                          * the unlikely event of reordering the original frags.
473                          * For injected local pages it's an unnecessary second
474                          * run.
475                          */
476                         if (unlikely(!ubuf) && startpoint != head_ubuf)
477                                 ubuf = xenvif_find_gref(skb, i, head_ubuf);
478
479                         if (likely(ubuf)) {
480                                 u16 pending_idx = ubuf->desc;
481
482                                 foreign_queue = ubuf_to_queue(ubuf);
483                                 foreign_gref =
484                                         foreign_queue->pending_tx_info[pending_idx].req.gref;
485                                 /* Just a safety measure. If this was the last
486                                  * element on the list, the for loop will
487                                  * iterate again if a local page were added to
488                                  * the end. Using head_ubuf here prevents the
489                                  * second search on the chain. Or the original
490                                  * frags changed order, but that's less likely.
491                                  * In any way, ubuf shouldn't be NULL.
492                                  */
493                                 ubuf = ubuf->ctx ?
494                                         (struct ubuf_info *) ubuf->ctx :
495                                         head_ubuf;
496                         } else
497                                 /* This frag was a local page, added to the
498                                  * array after the skb left netback.
499                                  */
500                                 ubuf = head_ubuf;
501                 }
502                 xenvif_gop_frag_copy(queue, skb, npo,
503                                      skb_frag_page(&skb_shinfo(skb)->frags[i]),
504                                      skb_frag_size(&skb_shinfo(skb)->frags[i]),
505                                      skb_shinfo(skb)->frags[i].page_offset,
506                                      &head,
507                                      foreign_queue,
508                                      foreign_queue ? foreign_gref : UINT_MAX);
509         }
510
511         return npo->meta_prod - old_meta_prod;
512 }
513
514 /*
515  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
516  * used to set up the operations on the top of
517  * netrx_pending_operations, which have since been done.  Check that
518  * they didn't give any errors and advance over them.
519  */
520 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
521                             struct netrx_pending_operations *npo)
522 {
523         struct gnttab_copy     *copy_op;
524         int status = XEN_NETIF_RSP_OKAY;
525         int i;
526
527         for (i = 0; i < nr_meta_slots; i++) {
528                 copy_op = npo->copy + npo->copy_cons++;
529                 if (copy_op->status != GNTST_okay) {
530                         netdev_dbg(vif->dev,
531                                    "Bad status %d from copy to DOM%d.\n",
532                                    copy_op->status, vif->domid);
533                         status = XEN_NETIF_RSP_ERROR;
534                 }
535         }
536
537         return status;
538 }
539
540 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
541                                       struct xenvif_rx_meta *meta,
542                                       int nr_meta_slots)
543 {
544         int i;
545         unsigned long offset;
546
547         /* No fragments used */
548         if (nr_meta_slots <= 1)
549                 return;
550
551         nr_meta_slots--;
552
553         for (i = 0; i < nr_meta_slots; i++) {
554                 int flags;
555                 if (i == nr_meta_slots - 1)
556                         flags = 0;
557                 else
558                         flags = XEN_NETRXF_more_data;
559
560                 offset = 0;
561                 make_rx_response(queue, meta[i].id, status, offset,
562                                  meta[i].size, flags);
563         }
564 }
565
566 void xenvif_kick_thread(struct xenvif_queue *queue)
567 {
568         wake_up(&queue->wq);
569 }
570
571 static void xenvif_rx_action(struct xenvif_queue *queue)
572 {
573         s8 status;
574         u16 flags;
575         struct xen_netif_rx_response *resp;
576         struct sk_buff_head rxq;
577         struct sk_buff *skb;
578         LIST_HEAD(notify);
579         int ret;
580         unsigned long offset;
581         bool need_to_notify = false;
582
583         struct netrx_pending_operations npo = {
584                 .copy  = queue->grant_copy_op,
585                 .meta  = queue->meta,
586         };
587
588         skb_queue_head_init(&rxq);
589
590         while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
591                 RING_IDX max_slots_needed;
592                 RING_IDX old_req_cons;
593                 RING_IDX ring_slots_used;
594                 int i;
595
596                 /* We need a cheap worse case estimate for the number of
597                  * slots we'll use.
598                  */
599
600                 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
601                                                 skb_headlen(skb),
602                                                 PAGE_SIZE);
603                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
604                         unsigned int size;
605                         unsigned int offset;
606
607                         size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
608                         offset = skb_shinfo(skb)->frags[i].page_offset;
609
610                         /* For a worse-case estimate we need to factor in
611                          * the fragment page offset as this will affect the
612                          * number of times xenvif_gop_frag_copy() will
613                          * call start_new_rx_buffer().
614                          */
615                         max_slots_needed += DIV_ROUND_UP(offset + size,
616                                                          PAGE_SIZE);
617                 }
618
619                 /* To avoid the estimate becoming too pessimal for some
620                  * frontends that limit posted rx requests, cap the estimate
621                  * at MAX_SKB_FRAGS. In this case netback will fully coalesce
622                  * the skb into the provided slots.
623                  */
624                 if (max_slots_needed > MAX_SKB_FRAGS) {
625                         max_slots_needed = MAX_SKB_FRAGS;
626                         XENVIF_RX_CB(skb)->full_coalesce = true;
627                 } else {
628                         XENVIF_RX_CB(skb)->full_coalesce = false;
629                 }
630
631                 /* We may need one more slot for GSO metadata */
632                 if (skb_is_gso(skb) &&
633                    (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
634                     skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
635                         max_slots_needed++;
636
637                 /* If the skb may not fit then bail out now */
638                 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
639                         skb_queue_head(&queue->rx_queue, skb);
640                         need_to_notify = true;
641                         queue->rx_last_skb_slots = max_slots_needed;
642                         break;
643                 } else
644                         queue->rx_last_skb_slots = 0;
645
646                 old_req_cons = queue->rx.req_cons;
647                 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
648                 ring_slots_used = queue->rx.req_cons - old_req_cons;
649
650                 BUG_ON(ring_slots_used > max_slots_needed);
651
652                 __skb_queue_tail(&rxq, skb);
653         }
654
655         BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
656
657         if (!npo.copy_prod)
658                 goto done;
659
660         BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
661         gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
662
663         while ((skb = __skb_dequeue(&rxq)) != NULL) {
664
665                 if ((1 << queue->meta[npo.meta_cons].gso_type) &
666                     queue->vif->gso_prefix_mask) {
667                         resp = RING_GET_RESPONSE(&queue->rx,
668                                                  queue->rx.rsp_prod_pvt++);
669
670                         resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
671
672                         resp->offset = queue->meta[npo.meta_cons].gso_size;
673                         resp->id = queue->meta[npo.meta_cons].id;
674                         resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
675
676                         npo.meta_cons++;
677                         XENVIF_RX_CB(skb)->meta_slots_used--;
678                 }
679
680
681                 queue->stats.tx_bytes += skb->len;
682                 queue->stats.tx_packets++;
683
684                 status = xenvif_check_gop(queue->vif,
685                                           XENVIF_RX_CB(skb)->meta_slots_used,
686                                           &npo);
687
688                 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
689                         flags = 0;
690                 else
691                         flags = XEN_NETRXF_more_data;
692
693                 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
694                         flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
695                 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
696                         /* remote but checksummed. */
697                         flags |= XEN_NETRXF_data_validated;
698
699                 offset = 0;
700                 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
701                                         status, offset,
702                                         queue->meta[npo.meta_cons].size,
703                                         flags);
704
705                 if ((1 << queue->meta[npo.meta_cons].gso_type) &
706                     queue->vif->gso_mask) {
707                         struct xen_netif_extra_info *gso =
708                                 (struct xen_netif_extra_info *)
709                                 RING_GET_RESPONSE(&queue->rx,
710                                                   queue->rx.rsp_prod_pvt++);
711
712                         resp->flags |= XEN_NETRXF_extra_info;
713
714                         gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
715                         gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
716                         gso->u.gso.pad = 0;
717                         gso->u.gso.features = 0;
718
719                         gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
720                         gso->flags = 0;
721                 }
722
723                 xenvif_add_frag_responses(queue, status,
724                                           queue->meta + npo.meta_cons + 1,
725                                           XENVIF_RX_CB(skb)->meta_slots_used);
726
727                 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
728
729                 need_to_notify |= !!ret;
730
731                 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
732                 dev_kfree_skb(skb);
733         }
734
735 done:
736         if (need_to_notify)
737                 notify_remote_via_irq(queue->rx_irq);
738 }
739
740 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
741 {
742         int more_to_do;
743
744         RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
745
746         if (more_to_do)
747                 napi_schedule(&queue->napi);
748 }
749
750 static void tx_add_credit(struct xenvif_queue *queue)
751 {
752         unsigned long max_burst, max_credit;
753
754         /*
755          * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
756          * Otherwise the interface can seize up due to insufficient credit.
757          */
758         max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
759         max_burst = min(max_burst, 131072UL);
760         max_burst = max(max_burst, queue->credit_bytes);
761
762         /* Take care that adding a new chunk of credit doesn't wrap to zero. */
763         max_credit = queue->remaining_credit + queue->credit_bytes;
764         if (max_credit < queue->remaining_credit)
765                 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
766
767         queue->remaining_credit = min(max_credit, max_burst);
768 }
769
770 static void tx_credit_callback(unsigned long data)
771 {
772         struct xenvif_queue *queue = (struct xenvif_queue *)data;
773         tx_add_credit(queue);
774         xenvif_napi_schedule_or_enable_events(queue);
775 }
776
777 static void xenvif_tx_err(struct xenvif_queue *queue,
778                           struct xen_netif_tx_request *txp, RING_IDX end)
779 {
780         RING_IDX cons = queue->tx.req_cons;
781         unsigned long flags;
782
783         do {
784                 spin_lock_irqsave(&queue->response_lock, flags);
785                 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
786                 spin_unlock_irqrestore(&queue->response_lock, flags);
787                 if (cons == end)
788                         break;
789                 txp = RING_GET_REQUEST(&queue->tx, cons++);
790         } while (1);
791         queue->tx.req_cons = cons;
792 }
793
794 static void xenvif_fatal_tx_err(struct xenvif *vif)
795 {
796         netdev_err(vif->dev, "fatal error; disabling device\n");
797         vif->disabled = true;
798         /* Disable the vif from queue 0's kthread */
799         if (vif->queues)
800                 xenvif_kick_thread(&vif->queues[0]);
801 }
802
803 static int xenvif_count_requests(struct xenvif_queue *queue,
804                                  struct xen_netif_tx_request *first,
805                                  struct xen_netif_tx_request *txp,
806                                  int work_to_do)
807 {
808         RING_IDX cons = queue->tx.req_cons;
809         int slots = 0;
810         int drop_err = 0;
811         int more_data;
812
813         if (!(first->flags & XEN_NETTXF_more_data))
814                 return 0;
815
816         do {
817                 struct xen_netif_tx_request dropped_tx = { 0 };
818
819                 if (slots >= work_to_do) {
820                         netdev_err(queue->vif->dev,
821                                    "Asked for %d slots but exceeds this limit\n",
822                                    work_to_do);
823                         xenvif_fatal_tx_err(queue->vif);
824                         return -ENODATA;
825                 }
826
827                 /* This guest is really using too many slots and
828                  * considered malicious.
829                  */
830                 if (unlikely(slots >= fatal_skb_slots)) {
831                         netdev_err(queue->vif->dev,
832                                    "Malicious frontend using %d slots, threshold %u\n",
833                                    slots, fatal_skb_slots);
834                         xenvif_fatal_tx_err(queue->vif);
835                         return -E2BIG;
836                 }
837
838                 /* Xen network protocol had implicit dependency on
839                  * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
840                  * the historical MAX_SKB_FRAGS value 18 to honor the
841                  * same behavior as before. Any packet using more than
842                  * 18 slots but less than fatal_skb_slots slots is
843                  * dropped
844                  */
845                 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
846                         if (net_ratelimit())
847                                 netdev_dbg(queue->vif->dev,
848                                            "Too many slots (%d) exceeding limit (%d), dropping packet\n",
849                                            slots, XEN_NETBK_LEGACY_SLOTS_MAX);
850                         drop_err = -E2BIG;
851                 }
852
853                 if (drop_err)
854                         txp = &dropped_tx;
855
856                 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
857                        sizeof(*txp));
858
859                 /* If the guest submitted a frame >= 64 KiB then
860                  * first->size overflowed and following slots will
861                  * appear to be larger than the frame.
862                  *
863                  * This cannot be fatal error as there are buggy
864                  * frontends that do this.
865                  *
866                  * Consume all slots and drop the packet.
867                  */
868                 if (!drop_err && txp->size > first->size) {
869                         if (net_ratelimit())
870                                 netdev_dbg(queue->vif->dev,
871                                            "Invalid tx request, slot size %u > remaining size %u\n",
872                                            txp->size, first->size);
873                         drop_err = -EIO;
874                 }
875
876                 first->size -= txp->size;
877                 slots++;
878
879                 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
880                         netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
881                                  txp->offset, txp->size);
882                         xenvif_fatal_tx_err(queue->vif);
883                         return -EINVAL;
884                 }
885
886                 more_data = txp->flags & XEN_NETTXF_more_data;
887
888                 if (!drop_err)
889                         txp++;
890
891         } while (more_data);
892
893         if (drop_err) {
894                 xenvif_tx_err(queue, first, cons + slots);
895                 return drop_err;
896         }
897
898         return slots;
899 }
900
901
902 struct xenvif_tx_cb {
903         u16 pending_idx;
904 };
905
906 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
907
908 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
909                                           u16 pending_idx,
910                                           struct xen_netif_tx_request *txp,
911                                           struct gnttab_map_grant_ref *mop)
912 {
913         queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
914         gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
915                           GNTMAP_host_map | GNTMAP_readonly,
916                           txp->gref, queue->vif->domid);
917
918         memcpy(&queue->pending_tx_info[pending_idx].req, txp,
919                sizeof(*txp));
920 }
921
922 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
923 {
924         struct sk_buff *skb =
925                 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
926                           GFP_ATOMIC | __GFP_NOWARN);
927         if (unlikely(skb == NULL))
928                 return NULL;
929
930         /* Packets passed to netif_rx() must have some headroom. */
931         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
932
933         /* Initialize it here to avoid later surprises */
934         skb_shinfo(skb)->destructor_arg = NULL;
935
936         return skb;
937 }
938
939 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
940                                                         struct sk_buff *skb,
941                                                         struct xen_netif_tx_request *txp,
942                                                         struct gnttab_map_grant_ref *gop)
943 {
944         struct skb_shared_info *shinfo = skb_shinfo(skb);
945         skb_frag_t *frags = shinfo->frags;
946         u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
947         int start;
948         pending_ring_idx_t index;
949         unsigned int nr_slots, frag_overflow = 0;
950
951         /* At this point shinfo->nr_frags is in fact the number of
952          * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
953          */
954         if (shinfo->nr_frags > MAX_SKB_FRAGS) {
955                 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
956                 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
957                 shinfo->nr_frags = MAX_SKB_FRAGS;
958         }
959         nr_slots = shinfo->nr_frags;
960
961         /* Skip first skb fragment if it is on same page as header fragment. */
962         start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
963
964         for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
965              shinfo->nr_frags++, txp++, gop++) {
966                 index = pending_index(queue->pending_cons++);
967                 pending_idx = queue->pending_ring[index];
968                 xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
969                 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
970         }
971
972         if (frag_overflow) {
973                 struct sk_buff *nskb = xenvif_alloc_skb(0);
974                 if (unlikely(nskb == NULL)) {
975                         if (net_ratelimit())
976                                 netdev_err(queue->vif->dev,
977                                            "Can't allocate the frag_list skb.\n");
978                         return NULL;
979                 }
980
981                 shinfo = skb_shinfo(nskb);
982                 frags = shinfo->frags;
983
984                 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
985                      shinfo->nr_frags++, txp++, gop++) {
986                         index = pending_index(queue->pending_cons++);
987                         pending_idx = queue->pending_ring[index];
988                         xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
989                         frag_set_pending_idx(&frags[shinfo->nr_frags],
990                                              pending_idx);
991                 }
992
993                 skb_shinfo(skb)->frag_list = nskb;
994         }
995
996         return gop;
997 }
998
999 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
1000                                            u16 pending_idx,
1001                                            grant_handle_t handle)
1002 {
1003         if (unlikely(queue->grant_tx_handle[pending_idx] !=
1004                      NETBACK_INVALID_HANDLE)) {
1005                 netdev_err(queue->vif->dev,
1006                            "Trying to overwrite active handle! pending_idx: %x\n",
1007                            pending_idx);
1008                 BUG();
1009         }
1010         queue->grant_tx_handle[pending_idx] = handle;
1011 }
1012
1013 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
1014                                              u16 pending_idx)
1015 {
1016         if (unlikely(queue->grant_tx_handle[pending_idx] ==
1017                      NETBACK_INVALID_HANDLE)) {
1018                 netdev_err(queue->vif->dev,
1019                            "Trying to unmap invalid handle! pending_idx: %x\n",
1020                            pending_idx);
1021                 BUG();
1022         }
1023         queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1024 }
1025
1026 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1027                                struct sk_buff *skb,
1028                                struct gnttab_map_grant_ref **gopp_map,
1029                                struct gnttab_copy **gopp_copy)
1030 {
1031         struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032         u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033         /* This always points to the shinfo of the skb being checked, which
1034          * could be either the first or the one on the frag_list
1035          */
1036         struct skb_shared_info *shinfo = skb_shinfo(skb);
1037         /* If this is non-NULL, we are currently checking the frag_list skb, and
1038          * this points to the shinfo of the first one
1039          */
1040         struct skb_shared_info *first_shinfo = NULL;
1041         int nr_frags = shinfo->nr_frags;
1042         const bool sharedslot = nr_frags &&
1043                                 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1044         int i, err;
1045
1046         /* Check status of header. */
1047         err = (*gopp_copy)->status;
1048         if (unlikely(err)) {
1049                 if (net_ratelimit())
1050                         netdev_dbg(queue->vif->dev,
1051                                    "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1052                                    (*gopp_copy)->status,
1053                                    pending_idx,
1054                                    (*gopp_copy)->source.u.ref);
1055                 /* The first frag might still have this slot mapped */
1056                 if (!sharedslot)
1057                         xenvif_idx_release(queue, pending_idx,
1058                                            XEN_NETIF_RSP_ERROR);
1059         }
1060         (*gopp_copy)++;
1061
1062 check_frags:
1063         for (i = 0; i < nr_frags; i++, gop_map++) {
1064                 int j, newerr;
1065
1066                 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1067
1068                 /* Check error status: if okay then remember grant handle. */
1069                 newerr = gop_map->status;
1070
1071                 if (likely(!newerr)) {
1072                         xenvif_grant_handle_set(queue,
1073                                                 pending_idx,
1074                                                 gop_map->handle);
1075                         /* Had a previous error? Invalidate this fragment. */
1076                         if (unlikely(err)) {
1077                                 xenvif_idx_unmap(queue, pending_idx);
1078                                 /* If the mapping of the first frag was OK, but
1079                                  * the header's copy failed, and they are
1080                                  * sharing a slot, send an error
1081                                  */
1082                                 if (i == 0 && sharedslot)
1083                                         xenvif_idx_release(queue, pending_idx,
1084                                                            XEN_NETIF_RSP_ERROR);
1085                                 else
1086                                         xenvif_idx_release(queue, pending_idx,
1087                                                            XEN_NETIF_RSP_OKAY);
1088                         }
1089                         continue;
1090                 }
1091
1092                 /* Error on this fragment: respond to client with an error. */
1093                 if (net_ratelimit())
1094                         netdev_dbg(queue->vif->dev,
1095                                    "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1096                                    i,
1097                                    gop_map->status,
1098                                    pending_idx,
1099                                    gop_map->ref);
1100
1101                 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1102
1103                 /* Not the first error? Preceding frags already invalidated. */
1104                 if (err)
1105                         continue;
1106
1107                 /* First error: if the header haven't shared a slot with the
1108                  * first frag, release it as well.
1109                  */
1110                 if (!sharedslot)
1111                         xenvif_idx_release(queue,
1112                                            XENVIF_TX_CB(skb)->pending_idx,
1113                                            XEN_NETIF_RSP_OKAY);
1114
1115                 /* Invalidate preceding fragments of this skb. */
1116                 for (j = 0; j < i; j++) {
1117                         pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1118                         xenvif_idx_unmap(queue, pending_idx);
1119                         xenvif_idx_release(queue, pending_idx,
1120                                            XEN_NETIF_RSP_OKAY);
1121                 }
1122
1123                 /* And if we found the error while checking the frag_list, unmap
1124                  * the first skb's frags
1125                  */
1126                 if (first_shinfo) {
1127                         for (j = 0; j < first_shinfo->nr_frags; j++) {
1128                                 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129                                 xenvif_idx_unmap(queue, pending_idx);
1130                                 xenvif_idx_release(queue, pending_idx,
1131                                                    XEN_NETIF_RSP_OKAY);
1132                         }
1133                 }
1134
1135                 /* Remember the error: invalidate all subsequent fragments. */
1136                 err = newerr;
1137         }
1138
1139         if (skb_has_frag_list(skb) && !first_shinfo) {
1140                 first_shinfo = skb_shinfo(skb);
1141                 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1142                 nr_frags = shinfo->nr_frags;
1143
1144                 goto check_frags;
1145         }
1146
1147         *gopp_map = gop_map;
1148         return err;
1149 }
1150
1151 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1152 {
1153         struct skb_shared_info *shinfo = skb_shinfo(skb);
1154         int nr_frags = shinfo->nr_frags;
1155         int i;
1156         u16 prev_pending_idx = INVALID_PENDING_IDX;
1157
1158         for (i = 0; i < nr_frags; i++) {
1159                 skb_frag_t *frag = shinfo->frags + i;
1160                 struct xen_netif_tx_request *txp;
1161                 struct page *page;
1162                 u16 pending_idx;
1163
1164                 pending_idx = frag_get_pending_idx(frag);
1165
1166                 /* If this is not the first frag, chain it to the previous*/
1167                 if (prev_pending_idx == INVALID_PENDING_IDX)
1168                         skb_shinfo(skb)->destructor_arg =
1169                                 &callback_param(queue, pending_idx);
1170                 else
1171                         callback_param(queue, prev_pending_idx).ctx =
1172                                 &callback_param(queue, pending_idx);
1173
1174                 callback_param(queue, pending_idx).ctx = NULL;
1175                 prev_pending_idx = pending_idx;
1176
1177                 txp = &queue->pending_tx_info[pending_idx].req;
1178                 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1179                 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1180                 skb->len += txp->size;
1181                 skb->data_len += txp->size;
1182                 skb->truesize += txp->size;
1183
1184                 /* Take an extra reference to offset network stack's put_page */
1185                 get_page(queue->mmap_pages[pending_idx]);
1186         }
1187         /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1188          * overlaps with "index", and "mapping" is not set. I think mapping
1189          * should be set. If delivered to local stack, it would drop this
1190          * skb in sk_filter unless the socket has the right to use it.
1191          */
1192         skb->pfmemalloc = false;
1193 }
1194
1195 static int xenvif_get_extras(struct xenvif_queue *queue,
1196                                 struct xen_netif_extra_info *extras,
1197                                 int work_to_do)
1198 {
1199         struct xen_netif_extra_info extra;
1200         RING_IDX cons = queue->tx.req_cons;
1201
1202         do {
1203                 if (unlikely(work_to_do-- <= 0)) {
1204                         netdev_err(queue->vif->dev, "Missing extra info\n");
1205                         xenvif_fatal_tx_err(queue->vif);
1206                         return -EBADR;
1207                 }
1208
1209                 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
1210                        sizeof(extra));
1211                 if (unlikely(!extra.type ||
1212                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1213                         queue->tx.req_cons = ++cons;
1214                         netdev_err(queue->vif->dev,
1215                                    "Invalid extra type: %d\n", extra.type);
1216                         xenvif_fatal_tx_err(queue->vif);
1217                         return -EINVAL;
1218                 }
1219
1220                 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1221                 queue->tx.req_cons = ++cons;
1222         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1223
1224         return work_to_do;
1225 }
1226
1227 static int xenvif_set_skb_gso(struct xenvif *vif,
1228                               struct sk_buff *skb,
1229                               struct xen_netif_extra_info *gso)
1230 {
1231         if (!gso->u.gso.size) {
1232                 netdev_err(vif->dev, "GSO size must not be zero.\n");
1233                 xenvif_fatal_tx_err(vif);
1234                 return -EINVAL;
1235         }
1236
1237         switch (gso->u.gso.type) {
1238         case XEN_NETIF_GSO_TYPE_TCPV4:
1239                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1240                 break;
1241         case XEN_NETIF_GSO_TYPE_TCPV6:
1242                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1243                 break;
1244         default:
1245                 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1246                 xenvif_fatal_tx_err(vif);
1247                 return -EINVAL;
1248         }
1249
1250         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1251         /* gso_segs will be calculated later */
1252
1253         return 0;
1254 }
1255
1256 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1257 {
1258         bool recalculate_partial_csum = false;
1259
1260         /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1261          * peers can fail to set NETRXF_csum_blank when sending a GSO
1262          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1263          * recalculate the partial checksum.
1264          */
1265         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1266                 queue->stats.rx_gso_checksum_fixup++;
1267                 skb->ip_summed = CHECKSUM_PARTIAL;
1268                 recalculate_partial_csum = true;
1269         }
1270
1271         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1272         if (skb->ip_summed != CHECKSUM_PARTIAL)
1273                 return 0;
1274
1275         return skb_checksum_setup(skb, recalculate_partial_csum);
1276 }
1277
1278 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1279 {
1280         u64 now = get_jiffies_64();
1281         u64 next_credit = queue->credit_window_start +
1282                 msecs_to_jiffies(queue->credit_usec / 1000);
1283
1284         /* Timer could already be pending in rare cases. */
1285         if (timer_pending(&queue->credit_timeout))
1286                 return true;
1287
1288         /* Passed the point where we can replenish credit? */
1289         if (time_after_eq64(now, next_credit)) {
1290                 queue->credit_window_start = now;
1291                 tx_add_credit(queue);
1292         }
1293
1294         /* Still too big to send right now? Set a callback. */
1295         if (size > queue->remaining_credit) {
1296                 queue->credit_timeout.data     =
1297                         (unsigned long)queue;
1298                 queue->credit_timeout.function =
1299                         tx_credit_callback;
1300                 mod_timer(&queue->credit_timeout,
1301                           next_credit);
1302                 queue->credit_window_start = next_credit;
1303
1304                 return true;
1305         }
1306
1307         return false;
1308 }
1309
1310 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1311                                      int budget,
1312                                      unsigned *copy_ops,
1313                                      unsigned *map_ops)
1314 {
1315         struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
1316         struct sk_buff *skb;
1317         int ret;
1318
1319         while (skb_queue_len(&queue->tx_queue) < budget) {
1320                 struct xen_netif_tx_request txreq;
1321                 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1322                 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1323                 u16 pending_idx;
1324                 RING_IDX idx;
1325                 int work_to_do;
1326                 unsigned int data_len;
1327                 pending_ring_idx_t index;
1328
1329                 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1330                     XEN_NETIF_TX_RING_SIZE) {
1331                         netdev_err(queue->vif->dev,
1332                                    "Impossible number of requests. "
1333                                    "req_prod %d, req_cons %d, size %ld\n",
1334                                    queue->tx.sring->req_prod, queue->tx.req_cons,
1335                                    XEN_NETIF_TX_RING_SIZE);
1336                         xenvif_fatal_tx_err(queue->vif);
1337                         break;
1338                 }
1339
1340                 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1341                 if (!work_to_do)
1342                         break;
1343
1344                 idx = queue->tx.req_cons;
1345                 rmb(); /* Ensure that we see the request before we copy it. */
1346                 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
1347
1348                 /* Credit-based scheduling. */
1349                 if (txreq.size > queue->remaining_credit &&
1350                     tx_credit_exceeded(queue, txreq.size))
1351                         break;
1352
1353                 queue->remaining_credit -= txreq.size;
1354
1355                 work_to_do--;
1356                 queue->tx.req_cons = ++idx;
1357
1358                 memset(extras, 0, sizeof(extras));
1359                 if (txreq.flags & XEN_NETTXF_extra_info) {
1360                         work_to_do = xenvif_get_extras(queue, extras,
1361                                                        work_to_do);
1362                         idx = queue->tx.req_cons;
1363                         if (unlikely(work_to_do < 0))
1364                                 break;
1365                 }
1366
1367                 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1368                 if (unlikely(ret < 0))
1369                         break;
1370
1371                 idx += ret;
1372
1373                 if (unlikely(txreq.size < ETH_HLEN)) {
1374                         netdev_dbg(queue->vif->dev,
1375                                    "Bad packet size: %d\n", txreq.size);
1376                         xenvif_tx_err(queue, &txreq, idx);
1377                         break;
1378                 }
1379
1380                 /* No crossing a page as the payload mustn't fragment. */
1381                 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1382                         netdev_err(queue->vif->dev,
1383                                    "txreq.offset: %x, size: %u, end: %lu\n",
1384                                    txreq.offset, txreq.size,
1385                                    (txreq.offset&~PAGE_MASK) + txreq.size);
1386                         xenvif_fatal_tx_err(queue->vif);
1387                         break;
1388                 }
1389
1390                 index = pending_index(queue->pending_cons);
1391                 pending_idx = queue->pending_ring[index];
1392
1393                 data_len = (txreq.size > PKT_PROT_LEN &&
1394                             ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1395                         PKT_PROT_LEN : txreq.size;
1396
1397                 skb = xenvif_alloc_skb(data_len);
1398                 if (unlikely(skb == NULL)) {
1399                         netdev_dbg(queue->vif->dev,
1400                                    "Can't allocate a skb in start_xmit.\n");
1401                         xenvif_tx_err(queue, &txreq, idx);
1402                         break;
1403                 }
1404
1405                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1406                         struct xen_netif_extra_info *gso;
1407                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1408
1409                         if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1410                                 /* Failure in xenvif_set_skb_gso is fatal. */
1411                                 kfree_skb(skb);
1412                                 break;
1413                         }
1414                 }
1415
1416                 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1417
1418                 __skb_put(skb, data_len);
1419                 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1420                 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1421                 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1422
1423                 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1424                         virt_to_mfn(skb->data);
1425                 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1426                 queue->tx_copy_ops[*copy_ops].dest.offset =
1427                         offset_in_page(skb->data);
1428
1429                 queue->tx_copy_ops[*copy_ops].len = data_len;
1430                 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1431
1432                 (*copy_ops)++;
1433
1434                 skb_shinfo(skb)->nr_frags = ret;
1435                 if (data_len < txreq.size) {
1436                         skb_shinfo(skb)->nr_frags++;
1437                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1438                                              pending_idx);
1439                         xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1440                         gop++;
1441                 } else {
1442                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1443                                              INVALID_PENDING_IDX);
1444                         memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1445                                sizeof(txreq));
1446                 }
1447
1448                 queue->pending_cons++;
1449
1450                 request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
1451                 if (request_gop == NULL) {
1452                         kfree_skb(skb);
1453                         xenvif_tx_err(queue, &txreq, idx);
1454                         break;
1455                 }
1456                 gop = request_gop;
1457
1458                 __skb_queue_tail(&queue->tx_queue, skb);
1459
1460                 queue->tx.req_cons = idx;
1461
1462                 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1463                     (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1464                         break;
1465         }
1466
1467         (*map_ops) = gop - queue->tx_map_ops;
1468         return;
1469 }
1470
1471 /* Consolidate skb with a frag_list into a brand new one with local pages on
1472  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1473  */
1474 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1475 {
1476         unsigned int offset = skb_headlen(skb);
1477         skb_frag_t frags[MAX_SKB_FRAGS];
1478         int i;
1479         struct ubuf_info *uarg;
1480         struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1481
1482         queue->stats.tx_zerocopy_sent += 2;
1483         queue->stats.tx_frag_overflow++;
1484
1485         xenvif_fill_frags(queue, nskb);
1486         /* Subtract frags size, we will correct it later */
1487         skb->truesize -= skb->data_len;
1488         skb->len += nskb->len;
1489         skb->data_len += nskb->len;
1490
1491         /* create a brand new frags array and coalesce there */
1492         for (i = 0; offset < skb->len; i++) {
1493                 struct page *page;
1494                 unsigned int len;
1495
1496                 BUG_ON(i >= MAX_SKB_FRAGS);
1497                 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1498                 if (!page) {
1499                         int j;
1500                         skb->truesize += skb->data_len;
1501                         for (j = 0; j < i; j++)
1502                                 put_page(frags[j].page.p);
1503                         return -ENOMEM;
1504                 }
1505
1506                 if (offset + PAGE_SIZE < skb->len)
1507                         len = PAGE_SIZE;
1508                 else
1509                         len = skb->len - offset;
1510                 if (skb_copy_bits(skb, offset, page_address(page), len))
1511                         BUG();
1512
1513                 offset += len;
1514                 frags[i].page.p = page;
1515                 frags[i].page_offset = 0;
1516                 skb_frag_size_set(&frags[i], len);
1517         }
1518         /* swap out with old one */
1519         memcpy(skb_shinfo(skb)->frags,
1520                frags,
1521                i * sizeof(skb_frag_t));
1522         skb_shinfo(skb)->nr_frags = i;
1523         skb->truesize += i * PAGE_SIZE;
1524
1525         /* remove traces of mapped pages and frag_list */
1526         skb_frag_list_init(skb);
1527         uarg = skb_shinfo(skb)->destructor_arg;
1528         /* increase inflight counter to offset decrement in callback */
1529         atomic_inc(&queue->inflight_packets);
1530         uarg->callback(uarg, true);
1531         skb_shinfo(skb)->destructor_arg = NULL;
1532
1533         xenvif_skb_zerocopy_prepare(queue, nskb);
1534         kfree_skb(nskb);
1535
1536         return 0;
1537 }
1538
1539 static int xenvif_tx_submit(struct xenvif_queue *queue)
1540 {
1541         struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1542         struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1543         struct sk_buff *skb;
1544         int work_done = 0;
1545
1546         while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1547                 struct xen_netif_tx_request *txp;
1548                 u16 pending_idx;
1549                 unsigned data_len;
1550
1551                 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1552                 txp = &queue->pending_tx_info[pending_idx].req;
1553
1554                 /* Check the remap error code. */
1555                 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1556                         /* If there was an error, xenvif_tx_check_gop is
1557                          * expected to release all the frags which were mapped,
1558                          * so kfree_skb shouldn't do it again
1559                          */
1560                         skb_shinfo(skb)->nr_frags = 0;
1561                         if (skb_has_frag_list(skb)) {
1562                                 struct sk_buff *nskb =
1563                                                 skb_shinfo(skb)->frag_list;
1564                                 skb_shinfo(nskb)->nr_frags = 0;
1565                         }
1566                         kfree_skb(skb);
1567                         continue;
1568                 }
1569
1570                 data_len = skb->len;
1571                 callback_param(queue, pending_idx).ctx = NULL;
1572                 if (data_len < txp->size) {
1573                         /* Append the packet payload as a fragment. */
1574                         txp->offset += data_len;
1575                         txp->size -= data_len;
1576                 } else {
1577                         /* Schedule a response immediately. */
1578                         xenvif_idx_release(queue, pending_idx,
1579                                            XEN_NETIF_RSP_OKAY);
1580                 }
1581
1582                 if (txp->flags & XEN_NETTXF_csum_blank)
1583                         skb->ip_summed = CHECKSUM_PARTIAL;
1584                 else if (txp->flags & XEN_NETTXF_data_validated)
1585                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1586
1587                 xenvif_fill_frags(queue, skb);
1588
1589                 if (unlikely(skb_has_frag_list(skb))) {
1590                         if (xenvif_handle_frag_list(queue, skb)) {
1591                                 if (net_ratelimit())
1592                                         netdev_err(queue->vif->dev,
1593                                                    "Not enough memory to consolidate frag_list!\n");
1594                                 xenvif_skb_zerocopy_prepare(queue, skb);
1595                                 kfree_skb(skb);
1596                                 continue;
1597                         }
1598                 }
1599
1600                 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1601                         int target = min_t(int, skb->len, PKT_PROT_LEN);
1602                         __pskb_pull_tail(skb, target - skb_headlen(skb));
1603                 }
1604
1605                 skb->dev      = queue->vif->dev;
1606                 skb->protocol = eth_type_trans(skb, skb->dev);
1607                 skb_reset_network_header(skb);
1608
1609                 if (checksum_setup(queue, skb)) {
1610                         netdev_dbg(queue->vif->dev,
1611                                    "Can't setup checksum in net_tx_action\n");
1612                         /* We have to set this flag to trigger the callback */
1613                         if (skb_shinfo(skb)->destructor_arg)
1614                                 xenvif_skb_zerocopy_prepare(queue, skb);
1615                         kfree_skb(skb);
1616                         continue;
1617                 }
1618
1619                 skb_probe_transport_header(skb, 0);
1620
1621                 /* If the packet is GSO then we will have just set up the
1622                  * transport header offset in checksum_setup so it's now
1623                  * straightforward to calculate gso_segs.
1624                  */
1625                 if (skb_is_gso(skb)) {
1626                         int mss = skb_shinfo(skb)->gso_size;
1627                         int hdrlen = skb_transport_header(skb) -
1628                                 skb_mac_header(skb) +
1629                                 tcp_hdrlen(skb);
1630
1631                         skb_shinfo(skb)->gso_segs =
1632                                 DIV_ROUND_UP(skb->len - hdrlen, mss);
1633                 }
1634
1635                 queue->stats.rx_bytes += skb->len;
1636                 queue->stats.rx_packets++;
1637
1638                 work_done++;
1639
1640                 /* Set this flag right before netif_receive_skb, otherwise
1641                  * someone might think this packet already left netback, and
1642                  * do a skb_copy_ubufs while we are still in control of the
1643                  * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1644                  */
1645                 if (skb_shinfo(skb)->destructor_arg) {
1646                         xenvif_skb_zerocopy_prepare(queue, skb);
1647                         queue->stats.tx_zerocopy_sent++;
1648                 }
1649
1650                 netif_receive_skb(skb);
1651         }
1652
1653         return work_done;
1654 }
1655
1656 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1657 {
1658         unsigned long flags;
1659         pending_ring_idx_t index;
1660         struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1661
1662         /* This is the only place where we grab this lock, to protect callbacks
1663          * from each other.
1664          */
1665         spin_lock_irqsave(&queue->callback_lock, flags);
1666         do {
1667                 u16 pending_idx = ubuf->desc;
1668                 ubuf = (struct ubuf_info *) ubuf->ctx;
1669                 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1670                         MAX_PENDING_REQS);
1671                 index = pending_index(queue->dealloc_prod);
1672                 queue->dealloc_ring[index] = pending_idx;
1673                 /* Sync with xenvif_tx_dealloc_action:
1674                  * insert idx then incr producer.
1675                  */
1676                 smp_wmb();
1677                 queue->dealloc_prod++;
1678         } while (ubuf);
1679         wake_up(&queue->dealloc_wq);
1680         spin_unlock_irqrestore(&queue->callback_lock, flags);
1681
1682         if (likely(zerocopy_success))
1683                 queue->stats.tx_zerocopy_success++;
1684         else
1685                 queue->stats.tx_zerocopy_fail++;
1686         xenvif_skb_zerocopy_complete(queue);
1687 }
1688
1689 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1690 {
1691         struct gnttab_unmap_grant_ref *gop;
1692         pending_ring_idx_t dc, dp;
1693         u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1694         unsigned int i = 0;
1695
1696         dc = queue->dealloc_cons;
1697         gop = queue->tx_unmap_ops;
1698
1699         /* Free up any grants we have finished using */
1700         do {
1701                 dp = queue->dealloc_prod;
1702
1703                 /* Ensure we see all indices enqueued by all
1704                  * xenvif_zerocopy_callback().
1705                  */
1706                 smp_rmb();
1707
1708                 while (dc != dp) {
1709                         BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1710                         pending_idx =
1711                                 queue->dealloc_ring[pending_index(dc++)];
1712
1713                         pending_idx_release[gop-queue->tx_unmap_ops] =
1714                                 pending_idx;
1715                         queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1716                                 queue->mmap_pages[pending_idx];
1717                         gnttab_set_unmap_op(gop,
1718                                             idx_to_kaddr(queue, pending_idx),
1719                                             GNTMAP_host_map,
1720                                             queue->grant_tx_handle[pending_idx]);
1721                         xenvif_grant_handle_reset(queue, pending_idx);
1722                         ++gop;
1723                 }
1724
1725         } while (dp != queue->dealloc_prod);
1726
1727         queue->dealloc_cons = dc;
1728
1729         if (gop - queue->tx_unmap_ops > 0) {
1730                 int ret;
1731                 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1732                                         NULL,
1733                                         queue->pages_to_unmap,
1734                                         gop - queue->tx_unmap_ops);
1735                 if (ret) {
1736                         netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1737                                    gop - queue->tx_unmap_ops, ret);
1738                         for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1739                                 if (gop[i].status != GNTST_okay)
1740                                         netdev_err(queue->vif->dev,
1741                                                    " host_addr: %llx handle: %x status: %d\n",
1742                                                    gop[i].host_addr,
1743                                                    gop[i].handle,
1744                                                    gop[i].status);
1745                         }
1746                         BUG();
1747                 }
1748         }
1749
1750         for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1751                 xenvif_idx_release(queue, pending_idx_release[i],
1752                                    XEN_NETIF_RSP_OKAY);
1753 }
1754
1755
1756 /* Called after netfront has transmitted */
1757 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1758 {
1759         unsigned nr_mops, nr_cops = 0;
1760         int work_done, ret;
1761
1762         if (unlikely(!tx_work_todo(queue)))
1763                 return 0;
1764
1765         xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1766
1767         if (nr_cops == 0)
1768                 return 0;
1769
1770         gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1771         if (nr_mops != 0) {
1772                 ret = gnttab_map_refs(queue->tx_map_ops,
1773                                       NULL,
1774                                       queue->pages_to_map,
1775                                       nr_mops);
1776                 BUG_ON(ret);
1777         }
1778
1779         work_done = xenvif_tx_submit(queue);
1780
1781         return work_done;
1782 }
1783
1784 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1785                                u8 status)
1786 {
1787         struct pending_tx_info *pending_tx_info;
1788         pending_ring_idx_t index;
1789         unsigned long flags;
1790
1791         pending_tx_info = &queue->pending_tx_info[pending_idx];
1792         spin_lock_irqsave(&queue->response_lock, flags);
1793         make_tx_response(queue, &pending_tx_info->req, status);
1794         index = pending_index(queue->pending_prod);
1795         queue->pending_ring[index] = pending_idx;
1796         /* TX shouldn't use the index before we give it back here */
1797         mb();
1798         queue->pending_prod++;
1799         spin_unlock_irqrestore(&queue->response_lock, flags);
1800 }
1801
1802
1803 static void make_tx_response(struct xenvif_queue *queue,
1804                              struct xen_netif_tx_request *txp,
1805                              s8       st)
1806 {
1807         RING_IDX i = queue->tx.rsp_prod_pvt;
1808         struct xen_netif_tx_response *resp;
1809         int notify;
1810
1811         resp = RING_GET_RESPONSE(&queue->tx, i);
1812         resp->id     = txp->id;
1813         resp->status = st;
1814
1815         if (txp->flags & XEN_NETTXF_extra_info)
1816                 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1817
1818         queue->tx.rsp_prod_pvt = ++i;
1819         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1820         if (notify)
1821                 notify_remote_via_irq(queue->tx_irq);
1822 }
1823
1824 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1825                                              u16      id,
1826                                              s8       st,
1827                                              u16      offset,
1828                                              u16      size,
1829                                              u16      flags)
1830 {
1831         RING_IDX i = queue->rx.rsp_prod_pvt;
1832         struct xen_netif_rx_response *resp;
1833
1834         resp = RING_GET_RESPONSE(&queue->rx, i);
1835         resp->offset     = offset;
1836         resp->flags      = flags;
1837         resp->id         = id;
1838         resp->status     = (s16)size;
1839         if (st < 0)
1840                 resp->status = (s16)st;
1841
1842         queue->rx.rsp_prod_pvt = ++i;
1843
1844         return resp;
1845 }
1846
1847 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1848 {
1849         int ret;
1850         struct gnttab_unmap_grant_ref tx_unmap_op;
1851
1852         gnttab_set_unmap_op(&tx_unmap_op,
1853                             idx_to_kaddr(queue, pending_idx),
1854                             GNTMAP_host_map,
1855                             queue->grant_tx_handle[pending_idx]);
1856         xenvif_grant_handle_reset(queue, pending_idx);
1857
1858         ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1859                                 &queue->mmap_pages[pending_idx], 1);
1860         if (ret) {
1861                 netdev_err(queue->vif->dev,
1862                            "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1863                            ret,
1864                            pending_idx,
1865                            tx_unmap_op.host_addr,
1866                            tx_unmap_op.handle,
1867                            tx_unmap_op.status);
1868                 BUG();
1869         }
1870 }
1871
1872 static inline int rx_work_todo(struct xenvif_queue *queue)
1873 {
1874         return (!skb_queue_empty(&queue->rx_queue) &&
1875                xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
1876 }
1877
1878 static inline int tx_work_todo(struct xenvif_queue *queue)
1879 {
1880         if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1881                 return 1;
1882
1883         return 0;
1884 }
1885
1886 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1887 {
1888         return queue->dealloc_cons != queue->dealloc_prod;
1889 }
1890
1891 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1892 {
1893         if (queue->tx.sring)
1894                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1895                                         queue->tx.sring);
1896         if (queue->rx.sring)
1897                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1898                                         queue->rx.sring);
1899 }
1900
1901 int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1902                               grant_ref_t tx_ring_ref,
1903                               grant_ref_t rx_ring_ref)
1904 {
1905         void *addr;
1906         struct xen_netif_tx_sring *txs;
1907         struct xen_netif_rx_sring *rxs;
1908
1909         int err = -ENOMEM;
1910
1911         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1912                                      tx_ring_ref, &addr);
1913         if (err)
1914                 goto err;
1915
1916         txs = (struct xen_netif_tx_sring *)addr;
1917         BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1918
1919         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1920                                      rx_ring_ref, &addr);
1921         if (err)
1922                 goto err;
1923
1924         rxs = (struct xen_netif_rx_sring *)addr;
1925         BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1926
1927         return 0;
1928
1929 err:
1930         xenvif_unmap_frontend_rings(queue);
1931         return err;
1932 }
1933
1934 static void xenvif_start_queue(struct xenvif_queue *queue)
1935 {
1936         if (xenvif_schedulable(queue->vif))
1937                 xenvif_wake_queue(queue);
1938 }
1939
1940 /* Only called from the queue's thread, it handles the situation when the guest
1941  * doesn't post enough requests on the receiving ring.
1942  * First xenvif_start_xmit disables QDisc and start a timer, and then either the
1943  * timer fires, or the guest send an interrupt after posting new request. If it
1944  * is the timer, the carrier is turned off here.
1945  * */
1946 static void xenvif_rx_purge_event(struct xenvif_queue *queue)
1947 {
1948         /* Either the last unsuccesful skb or at least 1 slot should fit */
1949         int needed = queue->rx_last_skb_slots ?
1950                      queue->rx_last_skb_slots : 1;
1951
1952         /* It is assumed that if the guest post new slots after this, the RX
1953          * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
1954          * the thread again
1955          */
1956         set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
1957         if (!xenvif_rx_ring_slots_available(queue, needed)) {
1958                 rtnl_lock();
1959                 if (netif_carrier_ok(queue->vif->dev)) {
1960                         /* Timer fired and there are still no slots. Turn off
1961                          * everything except the interrupts
1962                          */
1963                         netif_carrier_off(queue->vif->dev);
1964                         skb_queue_purge(&queue->rx_queue);
1965                         queue->rx_last_skb_slots = 0;
1966                         if (net_ratelimit())
1967                                 netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
1968                 } else {
1969                         /* Probably an another queue already turned the carrier
1970                          * off, make sure nothing is stucked in the internal
1971                          * queue of this queue
1972                          */
1973                         skb_queue_purge(&queue->rx_queue);
1974                         queue->rx_last_skb_slots = 0;
1975                 }
1976                 rtnl_unlock();
1977         } else if (!netif_carrier_ok(queue->vif->dev)) {
1978                 unsigned int num_queues = queue->vif->num_queues;
1979                 unsigned int i;
1980                 /* The carrier was down, but an interrupt kicked
1981                  * the thread again after new requests were
1982                  * posted
1983                  */
1984                 clear_bit(QUEUE_STATUS_RX_STALLED,
1985                           &queue->status);
1986                 rtnl_lock();
1987                 netif_carrier_on(queue->vif->dev);
1988                 netif_tx_wake_all_queues(queue->vif->dev);
1989                 rtnl_unlock();
1990
1991                 for (i = 0; i < num_queues; i++) {
1992                         struct xenvif_queue *temp = &queue->vif->queues[i];
1993
1994                         xenvif_napi_schedule_or_enable_events(temp);
1995                 }
1996                 if (net_ratelimit())
1997                         netdev_err(queue->vif->dev, "Carrier on again\n");
1998         } else {
1999                 /* Queuing were stopped, but the guest posted
2000                  * new requests and sent an interrupt
2001                  */
2002                 clear_bit(QUEUE_STATUS_RX_STALLED,
2003                           &queue->status);
2004                 del_timer_sync(&queue->rx_stalled);
2005                 xenvif_start_queue(queue);
2006         }
2007 }
2008
2009 int xenvif_kthread_guest_rx(void *data)
2010 {
2011         struct xenvif_queue *queue = data;
2012         struct sk_buff *skb;
2013
2014         while (!kthread_should_stop()) {
2015                 wait_event_interruptible(queue->wq,
2016                                          rx_work_todo(queue) ||
2017                                          queue->vif->disabled ||
2018                                          test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
2019                                          kthread_should_stop());
2020
2021                 if (kthread_should_stop())
2022                         break;
2023
2024                 /* This frontend is found to be rogue, disable it in
2025                  * kthread context. Currently this is only set when
2026                  * netback finds out frontend sends malformed packet,
2027                  * but we cannot disable the interface in softirq
2028                  * context so we defer it here, if this thread is
2029                  * associated with queue 0.
2030                  */
2031                 if (unlikely(queue->vif->disabled && queue->id == 0)) {
2032                         xenvif_carrier_off(queue->vif);
2033                 } else if (unlikely(queue->vif->disabled)) {
2034                         /* kthread_stop() would be called upon this thread soon,
2035                          * be a bit proactive
2036                          */
2037                         skb_queue_purge(&queue->rx_queue);
2038                         queue->rx_last_skb_slots = 0;
2039                 } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
2040                                                      &queue->status))) {
2041                         xenvif_rx_purge_event(queue);
2042                 } else if (!netif_carrier_ok(queue->vif->dev)) {
2043                         /* Another queue stalled and turned the carrier off, so
2044                          * purge the internal queue of queues which were not
2045                          * blocked
2046                          */
2047                         skb_queue_purge(&queue->rx_queue);
2048                         queue->rx_last_skb_slots = 0;
2049                 }
2050
2051                 if (!skb_queue_empty(&queue->rx_queue))
2052                         xenvif_rx_action(queue);
2053
2054                 cond_resched();
2055         }
2056
2057         /* Bin any remaining skbs */
2058         while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
2059                 dev_kfree_skb(skb);
2060
2061         return 0;
2062 }
2063
2064 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2065 {
2066         /* Dealloc thread must remain running until all inflight
2067          * packets complete.
2068          */
2069         return kthread_should_stop() &&
2070                 !atomic_read(&queue->inflight_packets);
2071 }
2072
2073 int xenvif_dealloc_kthread(void *data)
2074 {
2075         struct xenvif_queue *queue = data;
2076
2077         for (;;) {
2078                 wait_event_interruptible(queue->dealloc_wq,
2079                                          tx_dealloc_work_todo(queue) ||
2080                                          xenvif_dealloc_kthread_should_stop(queue));
2081                 if (xenvif_dealloc_kthread_should_stop(queue))
2082                         break;
2083
2084                 xenvif_tx_dealloc_action(queue);
2085                 cond_resched();
2086         }
2087
2088         /* Unmap anything remaining*/
2089         if (tx_dealloc_work_todo(queue))
2090                 xenvif_tx_dealloc_action(queue);
2091
2092         return 0;
2093 }
2094
2095 static int __init netback_init(void)
2096 {
2097         int rc = 0;
2098
2099         if (!xen_domain())
2100                 return -ENODEV;
2101
2102         /* Allow as many queues as there are CPUs, by default */
2103         xenvif_max_queues = num_online_cpus();
2104
2105         if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2106                 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2107                         fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2108                 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2109         }
2110
2111         rc = xenvif_xenbus_init();
2112         if (rc)
2113                 goto failed_init;
2114
2115         rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
2116
2117 #ifdef CONFIG_DEBUG_FS
2118         xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2119         if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2120                 pr_warn("Init of debugfs returned %ld!\n",
2121                         PTR_ERR(xen_netback_dbg_root));
2122 #endif /* CONFIG_DEBUG_FS */
2123
2124         return 0;
2125
2126 failed_init:
2127         return rc;
2128 }
2129
2130 module_init(netback_init);
2131
2132 static void __exit netback_fini(void)
2133 {
2134 #ifdef CONFIG_DEBUG_FS
2135         if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2136                 debugfs_remove_recursive(xen_netback_dbg_root);
2137 #endif /* CONFIG_DEBUG_FS */
2138         xenvif_xenbus_fini();
2139 }
2140 module_exit(netback_fini);
2141
2142 MODULE_LICENSE("Dual BSD/GPL");
2143 MODULE_ALIAS("xen-backend:vif");