1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * RMNET Data MAP protocol
16 #include <linux/netdevice.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include "rmnet_config.h"
21 #include "rmnet_map.h"
22 #include "rmnet_private.h"
24 #define RMNET_MAP_DEAGGR_SPACING 64
25 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
27 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
28 const void *txporthdr)
30 __sum16 *check = NULL;
34 check = &(((struct tcphdr *)txporthdr)->check);
38 check = &(((struct udphdr *)txporthdr)->check);
50 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
51 struct rmnet_map_dl_csum_trailer *csum_trailer,
52 struct rmnet_priv *priv)
54 __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
55 u16 csum_value, csum_value_final;
60 ip4h = (struct iphdr *)(skb->data);
61 if ((ntohs(ip4h->frag_off) & IP_MF) ||
62 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
63 priv->stats.csum_fragmented_pkt++;
67 txporthdr = skb->data + ip4h->ihl * 4;
69 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
72 priv->stats.csum_err_invalid_transport++;
73 return -EPROTONOSUPPORT;
76 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
77 if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
78 priv->stats.csum_skipped++;
82 csum_value = ~ntohs(csum_trailer->csum_value);
83 hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
84 ip_payload_csum = csum16_sub((__force __sum16)csum_value,
85 (__force __be16)hdr_csum);
87 pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
88 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
90 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
91 pseudo_csum = csum16_add(ip_payload_csum, addend);
93 addend = (__force __be16)ntohs((__force __be16)*csum_field);
94 csum_temp = ~csum16_sub(pseudo_csum, addend);
95 csum_value_final = (__force u16)csum_temp;
97 if (unlikely(csum_value_final == 0)) {
98 switch (ip4h->protocol) {
100 /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
101 csum_value_final = ~csum_value_final;
105 /* DL4 Non-RFC compliant TCP checksum found */
106 if (*csum_field == (__force __sum16)0xFFFF)
107 csum_value_final = ~csum_value_final;
112 if (csum_value_final == ntohs((__force __be16)*csum_field)) {
113 priv->stats.csum_ok++;
116 priv->stats.csum_validation_failed++;
121 #if IS_ENABLED(CONFIG_IPV6)
123 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
124 struct rmnet_map_dl_csum_trailer *csum_trailer,
125 struct rmnet_priv *priv)
127 __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
128 u16 csum_value, csum_value_final;
129 __be16 ip6_hdr_csum, addend;
130 struct ipv6hdr *ip6h;
134 ip6h = (struct ipv6hdr *)(skb->data);
136 txporthdr = skb->data + sizeof(struct ipv6hdr);
137 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
140 priv->stats.csum_err_invalid_transport++;
141 return -EPROTONOSUPPORT;
144 csum_value = ~ntohs(csum_trailer->csum_value);
145 ip6_hdr_csum = (__force __be16)
146 ~ntohs((__force __be16)ip_compute_csum(ip6h,
147 (int)(txporthdr - (void *)(skb->data))));
148 ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
151 length = (ip6h->nexthdr == IPPROTO_UDP) ?
152 ntohs(((struct udphdr *)txporthdr)->len) :
153 ntohs(ip6h->payload_len);
154 pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
155 length, ip6h->nexthdr, 0));
156 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
157 pseudo_csum = csum16_add(ip6_payload_csum, addend);
159 addend = (__force __be16)ntohs((__force __be16)*csum_field);
160 csum_temp = ~csum16_sub(pseudo_csum, addend);
161 csum_value_final = (__force u16)csum_temp;
163 if (unlikely(csum_value_final == 0)) {
164 switch (ip6h->nexthdr) {
166 /* RFC 2460 section 8.1
167 * DL6 One's complement rule for UDP checksum 0
169 csum_value_final = ~csum_value_final;
173 /* DL6 Non-RFC compliant TCP checksum found */
174 if (*csum_field == (__force __sum16)0xFFFF)
175 csum_value_final = ~csum_value_final;
180 if (csum_value_final == ntohs((__force __be16)*csum_field)) {
181 priv->stats.csum_ok++;
184 priv->stats.csum_validation_failed++;
190 static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
192 struct iphdr *ip4h = (struct iphdr *)iphdr;
196 txphdr = iphdr + ip4h->ihl * 4;
198 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
199 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
205 rmnet_map_ipv4_ul_csum_header(void *iphdr,
206 struct rmnet_map_ul_csum_header *ul_header,
209 struct iphdr *ip4h = (struct iphdr *)iphdr;
210 __be16 *hdr = (__be16 *)ul_header, offset;
212 offset = htons((__force u16)(skb_transport_header(skb) -
213 (unsigned char *)iphdr));
214 ul_header->csum_start_offset = offset;
215 ul_header->csum_insert_offset = skb->csum_offset;
216 ul_header->csum_enabled = 1;
217 if (ip4h->protocol == IPPROTO_UDP)
218 ul_header->udp_ip4_ind = 1;
220 ul_header->udp_ip4_ind = 0;
222 /* Changing remaining fields to network order */
224 *hdr = htons((__force u16)*hdr);
226 skb->ip_summed = CHECKSUM_NONE;
228 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
231 #if IS_ENABLED(CONFIG_IPV6)
232 static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
234 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
238 txphdr = ip6hdr + sizeof(struct ipv6hdr);
240 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
241 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
247 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
248 struct rmnet_map_ul_csum_header *ul_header,
251 __be16 *hdr = (__be16 *)ul_header, offset;
253 offset = htons((__force u16)(skb_transport_header(skb) -
254 (unsigned char *)ip6hdr));
255 ul_header->csum_start_offset = offset;
256 ul_header->csum_insert_offset = skb->csum_offset;
257 ul_header->csum_enabled = 1;
258 ul_header->udp_ip4_ind = 0;
260 /* Changing remaining fields to network order */
262 *hdr = htons((__force u16)*hdr);
264 skb->ip_summed = CHECKSUM_NONE;
266 rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
270 /* Adds MAP header to front of skb->data
271 * Padding is calculated and set appropriately in MAP header. Mux ID is
274 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
277 struct rmnet_map_header *map_header;
278 u32 padding, map_datalen;
281 map_datalen = skb->len - hdrlen;
282 map_header = (struct rmnet_map_header *)
283 skb_push(skb, sizeof(struct rmnet_map_header));
284 memset(map_header, 0, sizeof(struct rmnet_map_header));
286 if (pad == RMNET_MAP_NO_PAD_BYTES) {
287 map_header->pkt_len = htons(map_datalen);
291 padding = ALIGN(map_datalen, 4) - map_datalen;
296 if (skb_tailroom(skb) < padding)
299 padbytes = (u8 *)skb_put(skb, padding);
300 memset(padbytes, 0, padding);
303 map_header->pkt_len = htons(map_datalen + padding);
304 map_header->pad_len = padding & 0x3F;
309 /* Deaggregates a single packet
310 * A whole new buffer is allocated for each portion of an aggregated frame.
311 * Caller should keep calling deaggregate() on the source skb until 0 is
312 * returned, indicating that there are no more packets to deaggregate. Caller
313 * is responsible for freeing the original skb.
315 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
316 struct rmnet_port *port)
318 struct rmnet_map_header *maph;
319 struct sk_buff *skbn;
325 maph = (struct rmnet_map_header *)skb->data;
326 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
328 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
329 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
331 if (((int)skb->len - (int)packet_len) < 0)
334 /* Some hardware can send us empty frames. Catch them */
335 if (ntohs(maph->pkt_len) == 0)
338 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
342 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
343 skb_put(skbn, packet_len);
344 memcpy(skbn->data, skb->data, packet_len);
345 skb_pull(skb, packet_len);
350 /* Validates packet checksums. Function takes a pointer to
351 * the beginning of a buffer which contains the IP payload +
352 * padding + checksum trailer.
353 * Only IPv4 and IPv6 are supported along with TCP & UDP.
354 * Fragmented or tunneled packets are not supported.
356 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
358 struct rmnet_priv *priv = netdev_priv(skb->dev);
359 struct rmnet_map_dl_csum_trailer *csum_trailer;
361 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
362 priv->stats.csum_sw++;
366 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
368 if (!csum_trailer->valid) {
369 priv->stats.csum_valid_unset++;
373 if (skb->protocol == htons(ETH_P_IP)) {
374 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
375 } else if (skb->protocol == htons(ETH_P_IPV6)) {
376 #if IS_ENABLED(CONFIG_IPV6)
377 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
379 priv->stats.csum_err_invalid_ip_version++;
380 return -EPROTONOSUPPORT;
383 priv->stats.csum_err_invalid_ip_version++;
384 return -EPROTONOSUPPORT;
390 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
391 * packets that are supported for UL checksum offload.
393 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
394 struct net_device *orig_dev)
396 struct rmnet_priv *priv = netdev_priv(orig_dev);
397 struct rmnet_map_ul_csum_header *ul_header;
400 ul_header = (struct rmnet_map_ul_csum_header *)
401 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
403 if (unlikely(!(orig_dev->features &
404 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
407 if (skb->ip_summed == CHECKSUM_PARTIAL) {
408 iphdr = (char *)ul_header +
409 sizeof(struct rmnet_map_ul_csum_header);
411 if (skb->protocol == htons(ETH_P_IP)) {
412 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
414 } else if (skb->protocol == htons(ETH_P_IPV6)) {
415 #if IS_ENABLED(CONFIG_IPV6)
416 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
419 priv->stats.csum_err_invalid_ip_version++;
423 priv->stats.csum_err_invalid_ip_version++;
428 ul_header->csum_start_offset = 0;
429 ul_header->csum_insert_offset = 0;
430 ul_header->csum_enabled = 0;
431 ul_header->udp_ip4_ind = 0;
433 priv->stats.csum_sw++;