Merge tag 'sound-fix-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / drivers / crypto / chelsio / chtls / chtls_main.c
1 /*
2  * Copyright (c) 2018 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Written by: Atul Gupta (atul.gupta@chelsio.com)
9  */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/hash.h>
15 #include <linux/in.h>
16 #include <linux/net.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <net/tcp.h>
20 #include <net/tls.h>
21
22 #include "chtls.h"
23 #include "chtls_cm.h"
24
25 #define DRV_NAME "chtls"
26
27 /*
28  * chtls device management
29  * maintains a list of the chtls devices
30  */
31 static LIST_HEAD(cdev_list);
32 static DEFINE_MUTEX(cdev_mutex);
33
34 static DEFINE_MUTEX(notify_mutex);
35 static RAW_NOTIFIER_HEAD(listen_notify_list);
36 static struct proto chtls_cpl_prot;
37 struct request_sock_ops chtls_rsk_ops;
38 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
39
40 static void register_listen_notifier(struct notifier_block *nb)
41 {
42         mutex_lock(&notify_mutex);
43         raw_notifier_chain_register(&listen_notify_list, nb);
44         mutex_unlock(&notify_mutex);
45 }
46
47 static void unregister_listen_notifier(struct notifier_block *nb)
48 {
49         mutex_lock(&notify_mutex);
50         raw_notifier_chain_unregister(&listen_notify_list, nb);
51         mutex_unlock(&notify_mutex);
52 }
53
54 static int listen_notify_handler(struct notifier_block *this,
55                                  unsigned long event, void *data)
56 {
57         struct chtls_listen *clisten;
58         int ret = NOTIFY_DONE;
59
60         clisten = (struct chtls_listen *)data;
61
62         switch (event) {
63         case CHTLS_LISTEN_START:
64                 ret = chtls_listen_start(clisten->cdev, clisten->sk);
65                 kfree(clisten);
66                 break;
67         case CHTLS_LISTEN_STOP:
68                 chtls_listen_stop(clisten->cdev, clisten->sk);
69                 kfree(clisten);
70                 break;
71         }
72         return ret;
73 }
74
75 static struct notifier_block listen_notifier = {
76         .notifier_call = listen_notify_handler
77 };
78
79 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
80 {
81         if (likely(skb_transport_header(skb) != skb_network_header(skb)))
82                 return tcp_v4_do_rcv(sk, skb);
83         BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
84         return 0;
85 }
86
87 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
88 {
89         struct chtls_listen *clisten;
90         int err;
91
92         if (sk->sk_protocol != IPPROTO_TCP)
93                 return -EPROTONOSUPPORT;
94
95         if (sk->sk_family == PF_INET &&
96             LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
97                 return -EADDRNOTAVAIL;
98
99         sk->sk_backlog_rcv = listen_backlog_rcv;
100         clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
101         if (!clisten)
102                 return -ENOMEM;
103         clisten->cdev = cdev;
104         clisten->sk = sk;
105         mutex_lock(&notify_mutex);
106         err = raw_notifier_call_chain(&listen_notify_list,
107                                       CHTLS_LISTEN_START, clisten);
108         mutex_unlock(&notify_mutex);
109         return err;
110 }
111
112 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
113 {
114         struct chtls_listen *clisten;
115
116         if (sk->sk_protocol != IPPROTO_TCP)
117                 return;
118
119         clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
120         if (!clisten)
121                 return;
122         clisten->cdev = cdev;
123         clisten->sk = sk;
124         mutex_lock(&notify_mutex);
125         raw_notifier_call_chain(&listen_notify_list,
126                                 CHTLS_LISTEN_STOP, clisten);
127         mutex_unlock(&notify_mutex);
128 }
129
130 static int chtls_inline_feature(struct tls_device *dev)
131 {
132         struct net_device *netdev;
133         struct chtls_dev *cdev;
134         int i;
135
136         cdev = to_chtls_dev(dev);
137
138         for (i = 0; i < cdev->lldi->nports; i++) {
139                 netdev = cdev->ports[i];
140                 if (netdev->features & NETIF_F_HW_TLS_RECORD)
141                         return 1;
142         }
143         return 0;
144 }
145
146 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
147 {
148         struct chtls_dev *cdev = to_chtls_dev(dev);
149
150         if (sk->sk_state == TCP_LISTEN)
151                 return chtls_start_listen(cdev, sk);
152         return 0;
153 }
154
155 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
156 {
157         struct chtls_dev *cdev = to_chtls_dev(dev);
158
159         if (sk->sk_state == TCP_LISTEN)
160                 chtls_stop_listen(cdev, sk);
161 }
162
163 static void chtls_free_uld(struct chtls_dev *cdev)
164 {
165         int i;
166
167         tls_unregister_device(&cdev->tlsdev);
168         kvfree(cdev->kmap.addr);
169         idr_destroy(&cdev->hwtid_idr);
170         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
171                 kfree_skb(cdev->rspq_skb_cache[i]);
172         kfree(cdev->lldi);
173         kfree_skb(cdev->askb);
174         kfree(cdev);
175 }
176
177 static inline void chtls_dev_release(struct kref *kref)
178 {
179         struct chtls_dev *cdev;
180         struct tls_device *dev;
181
182         dev = container_of(kref, struct tls_device, kref);
183         cdev = to_chtls_dev(dev);
184         chtls_free_uld(cdev);
185 }
186
187 static void chtls_register_dev(struct chtls_dev *cdev)
188 {
189         struct tls_device *tlsdev = &cdev->tlsdev;
190
191         strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
192         strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
193                 TLS_DEVICE_NAME_MAX);
194         tlsdev->feature = chtls_inline_feature;
195         tlsdev->hash = chtls_create_hash;
196         tlsdev->unhash = chtls_destroy_hash;
197         tlsdev->release = chtls_dev_release;
198         kref_init(&tlsdev->kref);
199         tls_register_device(tlsdev);
200         cdev->cdev_state = CHTLS_CDEV_STATE_UP;
201 }
202
203 static void process_deferq(struct work_struct *task_param)
204 {
205         struct chtls_dev *cdev = container_of(task_param,
206                                 struct chtls_dev, deferq_task);
207         struct sk_buff *skb;
208
209         spin_lock_bh(&cdev->deferq.lock);
210         while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
211                 spin_unlock_bh(&cdev->deferq.lock);
212                 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
213                 spin_lock_bh(&cdev->deferq.lock);
214         }
215         spin_unlock_bh(&cdev->deferq.lock);
216 }
217
218 static int chtls_get_skb(struct chtls_dev *cdev)
219 {
220         cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
221         if (!cdev->askb)
222                 return -ENOMEM;
223
224         skb_put(cdev->askb, sizeof(struct tcphdr));
225         skb_reset_transport_header(cdev->askb);
226         memset(cdev->askb->data, 0, cdev->askb->len);
227         return 0;
228 }
229
230 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
231 {
232         struct cxgb4_lld_info *lldi;
233         struct chtls_dev *cdev;
234         int i, j;
235
236         cdev = kzalloc(sizeof(*cdev) + info->nports *
237                       (sizeof(struct net_device *)), GFP_KERNEL);
238         if (!cdev)
239                 goto out;
240
241         lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
242         if (!lldi)
243                 goto out_lldi;
244
245         if (chtls_get_skb(cdev))
246                 goto out_skb;
247
248         *lldi = *info;
249         cdev->lldi = lldi;
250         cdev->pdev = lldi->pdev;
251         cdev->tids = lldi->tids;
252         cdev->ports = lldi->ports;
253         cdev->mtus = lldi->mtus;
254         cdev->tids = lldi->tids;
255         cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
256                         << FW_VIID_PFN_S;
257
258         for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
259                 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
260
261                 cdev->rspq_skb_cache[i] = __alloc_skb(size,
262                                                       gfp_any(), 0,
263                                                       lldi->nodeid);
264                 if (unlikely(!cdev->rspq_skb_cache[i]))
265                         goto out_rspq_skb;
266         }
267
268         idr_init(&cdev->hwtid_idr);
269         INIT_WORK(&cdev->deferq_task, process_deferq);
270         spin_lock_init(&cdev->listen_lock);
271         spin_lock_init(&cdev->idr_lock);
272         cdev->send_page_order = min_t(uint, get_order(32768),
273                                       send_page_order);
274         cdev->max_host_sndbuf = 48 * 1024;
275
276         if (lldi->vr->key.size)
277                 if (chtls_init_kmap(cdev, lldi))
278                         goto out_rspq_skb;
279
280         mutex_lock(&cdev_mutex);
281         list_add_tail(&cdev->list, &cdev_list);
282         mutex_unlock(&cdev_mutex);
283
284         return cdev;
285 out_rspq_skb:
286         for (j = 0; j < i; j++)
287                 kfree_skb(cdev->rspq_skb_cache[j]);
288         kfree_skb(cdev->askb);
289 out_skb:
290         kfree(lldi);
291 out_lldi:
292         kfree(cdev);
293 out:
294         return NULL;
295 }
296
297 static void chtls_free_all_uld(void)
298 {
299         struct chtls_dev *cdev, *tmp;
300
301         mutex_lock(&cdev_mutex);
302         list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
303                 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
304                         list_del(&cdev->list);
305                         kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
306                 }
307         }
308         mutex_unlock(&cdev_mutex);
309 }
310
311 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
312 {
313         struct chtls_dev *cdev = handle;
314
315         switch (new_state) {
316         case CXGB4_STATE_UP:
317                 chtls_register_dev(cdev);
318                 break;
319         case CXGB4_STATE_DOWN:
320                 break;
321         case CXGB4_STATE_START_RECOVERY:
322                 break;
323         case CXGB4_STATE_DETACH:
324                 mutex_lock(&cdev_mutex);
325                 list_del(&cdev->list);
326                 mutex_unlock(&cdev_mutex);
327                 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
328                 break;
329         default:
330                 break;
331         }
332         return 0;
333 }
334
335 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
336                                           const __be64 *rsp,
337                                           u32 pktshift)
338 {
339         struct sk_buff *skb;
340
341         /* Allocate space for cpl_pass_accpet_req which will be synthesized by
342          * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
343          * through the regular cpl_pass_accept_req processing in TOM.
344          */
345         skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
346                         - pktshift, GFP_ATOMIC);
347         if (unlikely(!skb))
348                 return NULL;
349         __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
350                    - pktshift);
351         /* For now we will copy  cpl_rx_pkt in the skb */
352         skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
353         skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
354                                        , gl->va + pktshift,
355                                        gl->tot_len - pktshift);
356
357         return skb;
358 }
359
360 static int chtls_recv_packet(struct chtls_dev *cdev,
361                              const struct pkt_gl *gl, const __be64 *rsp)
362 {
363         unsigned int opcode = *(u8 *)rsp;
364         struct sk_buff *skb;
365         int ret;
366
367         skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
368         if (!skb)
369                 return -ENOMEM;
370
371         ret = chtls_handlers[opcode](cdev, skb);
372         if (ret & CPL_RET_BUF_DONE)
373                 kfree_skb(skb);
374
375         return 0;
376 }
377
378 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
379 {
380         unsigned long rspq_bin;
381         unsigned int opcode;
382         struct sk_buff *skb;
383         unsigned int len;
384         int ret;
385
386         len = 64 - sizeof(struct rsp_ctrl) - 8;
387         opcode = *(u8 *)rsp;
388
389         rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
390         skb = cdev->rspq_skb_cache[rspq_bin];
391         if (skb && !skb_is_nonlinear(skb) &&
392             !skb_shared(skb) && !skb_cloned(skb)) {
393                 refcount_inc(&skb->users);
394                 if (refcount_read(&skb->users) == 2) {
395                         __skb_trim(skb, 0);
396                         if (skb_tailroom(skb) >= len)
397                                 goto copy_out;
398                 }
399                 refcount_dec(&skb->users);
400         }
401         skb = alloc_skb(len, GFP_ATOMIC);
402         if (unlikely(!skb))
403                 return -ENOMEM;
404
405 copy_out:
406         __skb_put(skb, len);
407         skb_copy_to_linear_data(skb, rsp, len);
408         skb_reset_network_header(skb);
409         skb_reset_transport_header(skb);
410         ret = chtls_handlers[opcode](cdev, skb);
411
412         if (ret & CPL_RET_BUF_DONE)
413                 kfree_skb(skb);
414         return 0;
415 }
416
417 static void chtls_recv(struct chtls_dev *cdev,
418                        struct sk_buff **skbs, const __be64 *rsp)
419 {
420         struct sk_buff *skb = *skbs;
421         unsigned int opcode;
422         int ret;
423
424         opcode = *(u8 *)rsp;
425
426         __skb_push(skb, sizeof(struct rss_header));
427         skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
428
429         ret = chtls_handlers[opcode](cdev, skb);
430         if (ret & CPL_RET_BUF_DONE)
431                 kfree_skb(skb);
432 }
433
434 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
435                                 const struct pkt_gl *gl)
436 {
437         struct chtls_dev *cdev = handle;
438         unsigned int opcode;
439         struct sk_buff *skb;
440
441         opcode = *(u8 *)rsp;
442
443         if (unlikely(opcode == CPL_RX_PKT)) {
444                 if (chtls_recv_packet(cdev, gl, rsp) < 0)
445                         goto nomem;
446                 return 0;
447         }
448
449         if (!gl)
450                 return chtls_recv_rsp(cdev, rsp);
451
452 #define RX_PULL_LEN 128
453         skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
454         if (unlikely(!skb))
455                 goto nomem;
456         chtls_recv(cdev, &skb, rsp);
457         return 0;
458
459 nomem:
460         return -ENOMEM;
461 }
462
463 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
464                                int __user *optlen)
465 {
466         struct tls_crypto_info crypto_info = { 0 };
467
468         crypto_info.version = TLS_1_2_VERSION;
469         if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
470                 return -EFAULT;
471         return 0;
472 }
473
474 static int chtls_getsockopt(struct sock *sk, int level, int optname,
475                             char __user *optval, int __user *optlen)
476 {
477         struct tls_context *ctx = tls_get_ctx(sk);
478
479         if (level != SOL_TLS)
480                 return ctx->getsockopt(sk, level, optname, optval, optlen);
481
482         return do_chtls_getsockopt(sk, optval, optlen);
483 }
484
485 static int do_chtls_setsockopt(struct sock *sk, int optname,
486                                char __user *optval, unsigned int optlen)
487 {
488         struct tls_crypto_info *crypto_info, tmp_crypto_info;
489         struct chtls_sock *csk;
490         int keylen;
491         int rc = 0;
492
493         csk = rcu_dereference_sk_user_data(sk);
494
495         if (!optval || optlen < sizeof(*crypto_info)) {
496                 rc = -EINVAL;
497                 goto out;
498         }
499
500         rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
501         if (rc) {
502                 rc = -EFAULT;
503                 goto out;
504         }
505
506         /* check version */
507         if (tmp_crypto_info.version != TLS_1_2_VERSION) {
508                 rc = -ENOTSUPP;
509                 goto out;
510         }
511
512         crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
513
514         switch (tmp_crypto_info.cipher_type) {
515         case TLS_CIPHER_AES_GCM_128: {
516                 /* Obtain version and type from previous copy */
517                 crypto_info[0] = tmp_crypto_info;
518                 /* Now copy the following data */
519                 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
520                                 optval + sizeof(*crypto_info),
521                                 sizeof(struct tls12_crypto_info_aes_gcm_128)
522                                 - sizeof(*crypto_info));
523
524                 if (rc) {
525                         rc = -EFAULT;
526                         goto out;
527                 }
528
529                 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
530                 rc = chtls_setkey(csk, keylen, optname);
531                 break;
532         }
533         default:
534                 rc = -EINVAL;
535                 goto out;
536         }
537 out:
538         return rc;
539 }
540
541 static int chtls_setsockopt(struct sock *sk, int level, int optname,
542                             char __user *optval, unsigned int optlen)
543 {
544         struct tls_context *ctx = tls_get_ctx(sk);
545
546         if (level != SOL_TLS)
547                 return ctx->setsockopt(sk, level, optname, optval, optlen);
548
549         return do_chtls_setsockopt(sk, optname, optval, optlen);
550 }
551
552 static struct cxgb4_uld_info chtls_uld_info = {
553         .name = DRV_NAME,
554         .nrxq = MAX_ULD_QSETS,
555         .ntxq = MAX_ULD_QSETS,
556         .rxq_size = 1024,
557         .add = chtls_uld_add,
558         .state_change = chtls_uld_state_change,
559         .rx_handler = chtls_uld_rx_handler,
560 };
561
562 void chtls_install_cpl_ops(struct sock *sk)
563 {
564         sk->sk_prot = &chtls_cpl_prot;
565 }
566
567 static void __init chtls_init_ulp_ops(void)
568 {
569         chtls_cpl_prot                  = tcp_prot;
570         chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
571                            &tcp_prot, PF_INET);
572         chtls_cpl_prot.close            = chtls_close;
573         chtls_cpl_prot.disconnect       = chtls_disconnect;
574         chtls_cpl_prot.destroy          = chtls_destroy_sock;
575         chtls_cpl_prot.shutdown         = chtls_shutdown;
576         chtls_cpl_prot.sendmsg          = chtls_sendmsg;
577         chtls_cpl_prot.sendpage         = chtls_sendpage;
578         chtls_cpl_prot.recvmsg          = chtls_recvmsg;
579         chtls_cpl_prot.setsockopt       = chtls_setsockopt;
580         chtls_cpl_prot.getsockopt       = chtls_getsockopt;
581 }
582
583 static int __init chtls_register(void)
584 {
585         chtls_init_ulp_ops();
586         register_listen_notifier(&listen_notifier);
587         cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
588         return 0;
589 }
590
591 static void __exit chtls_unregister(void)
592 {
593         unregister_listen_notifier(&listen_notifier);
594         chtls_free_all_uld();
595         cxgb4_unregister_uld(CXGB4_ULD_TLS);
596 }
597
598 module_init(chtls_register);
599 module_exit(chtls_unregister);
600
601 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
602 MODULE_LICENSE("GPL");
603 MODULE_AUTHOR("Chelsio Communications");
604 MODULE_VERSION(DRV_VERSION);