Merge tag 'block-6.3-2023-03-03' of git://git.kernel.dk/linux
[sfrench/cifs-2.6.git] / drivers / nvme / host / auth.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <asm/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
12 #include "nvme.h"
13 #include "fabrics.h"
14 #include <linux/nvme-auth.h>
15
16 #define CHAP_BUF_SIZE 4096
17 static struct kmem_cache *nvme_chap_buf_cache;
18 static mempool_t *nvme_chap_buf_pool;
19
20 struct nvme_dhchap_queue_context {
21         struct list_head entry;
22         struct work_struct auth_work;
23         struct nvme_ctrl *ctrl;
24         struct crypto_shash *shash_tfm;
25         struct crypto_kpp *dh_tfm;
26         void *buf;
27         int qid;
28         int error;
29         u32 s1;
30         u32 s2;
31         u16 transaction;
32         u8 status;
33         u8 hash_id;
34         size_t hash_len;
35         u8 dhgroup_id;
36         u8 c1[64];
37         u8 c2[64];
38         u8 response[64];
39         u8 *host_response;
40         u8 *ctrl_key;
41         int ctrl_key_len;
42         u8 *host_key;
43         int host_key_len;
44         u8 *sess_key;
45         int sess_key_len;
46 };
47
48 static struct workqueue_struct *nvme_auth_wq;
49
50 #define nvme_auth_flags_from_qid(qid) \
51         (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
52 #define nvme_auth_queue_from_qid(ctrl, qid) \
53         (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
54
55 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
56 {
57         return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
58                         ctrl->opts->nr_poll_queues + 1;
59 }
60
61 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
62                             void *data, size_t data_len, bool auth_send)
63 {
64         struct nvme_command cmd = {};
65         blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
66         struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
67         int ret;
68
69         cmd.auth_common.opcode = nvme_fabrics_command;
70         cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
71         cmd.auth_common.spsp0 = 0x01;
72         cmd.auth_common.spsp1 = 0x01;
73         if (auth_send) {
74                 cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
75                 cmd.auth_send.tl = cpu_to_le32(data_len);
76         } else {
77                 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
78                 cmd.auth_receive.al = cpu_to_le32(data_len);
79         }
80
81         ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
82                                      qid == 0 ? NVME_QID_ANY : qid,
83                                      0, flags);
84         if (ret > 0)
85                 dev_warn(ctrl->device,
86                         "qid %d auth_send failed with status %d\n", qid, ret);
87         else if (ret < 0)
88                 dev_err(ctrl->device,
89                         "qid %d auth_send failed with error %d\n", qid, ret);
90         return ret;
91 }
92
93 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
94                 struct nvmf_auth_dhchap_failure_data *data,
95                 u16 transaction, u8 expected_msg)
96 {
97         dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
98                 __func__, qid, data->auth_type, data->auth_id);
99
100         if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
101             data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
102                 return data->rescode_exp;
103         }
104         if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
105             data->auth_id != expected_msg) {
106                 dev_warn(ctrl->device,
107                          "qid %d invalid message %02x/%02x\n",
108                          qid, data->auth_type, data->auth_id);
109                 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
110         }
111         if (le16_to_cpu(data->t_id) != transaction) {
112                 dev_warn(ctrl->device,
113                          "qid %d invalid transaction ID %d\n",
114                          qid, le16_to_cpu(data->t_id));
115                 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
116         }
117         return 0;
118 }
119
120 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
121                 struct nvme_dhchap_queue_context *chap)
122 {
123         struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
124         size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
125
126         if (size > CHAP_BUF_SIZE) {
127                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
128                 return -EINVAL;
129         }
130         memset((u8 *)chap->buf, 0, size);
131         data->auth_type = NVME_AUTH_COMMON_MESSAGES;
132         data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
133         data->t_id = cpu_to_le16(chap->transaction);
134         data->sc_c = 0; /* No secure channel concatenation */
135         data->napd = 1;
136         data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
137         data->auth_protocol[0].dhchap.halen = 3;
138         data->auth_protocol[0].dhchap.dhlen = 6;
139         data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
140         data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
141         data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
142         data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
143         data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
144         data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
145         data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
146         data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
147         data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
148
149         return size;
150 }
151
152 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
153                 struct nvme_dhchap_queue_context *chap)
154 {
155         struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
156         u16 dhvlen = le16_to_cpu(data->dhvlen);
157         size_t size = sizeof(*data) + data->hl + dhvlen;
158         const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
159         const char *hmac_name, *kpp_name;
160
161         if (size > CHAP_BUF_SIZE) {
162                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
163                 return -EINVAL;
164         }
165
166         hmac_name = nvme_auth_hmac_name(data->hashid);
167         if (!hmac_name) {
168                 dev_warn(ctrl->device,
169                          "qid %d: invalid HASH ID %d\n",
170                          chap->qid, data->hashid);
171                 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
172                 return -EPROTO;
173         }
174
175         if (chap->hash_id == data->hashid && chap->shash_tfm &&
176             !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
177             crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
178                 dev_dbg(ctrl->device,
179                         "qid %d: reuse existing hash %s\n",
180                         chap->qid, hmac_name);
181                 goto select_kpp;
182         }
183
184         /* Reset if hash cannot be reused */
185         if (chap->shash_tfm) {
186                 crypto_free_shash(chap->shash_tfm);
187                 chap->hash_id = 0;
188                 chap->hash_len = 0;
189         }
190         chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
191                                              CRYPTO_ALG_ALLOCATES_MEMORY);
192         if (IS_ERR(chap->shash_tfm)) {
193                 dev_warn(ctrl->device,
194                          "qid %d: failed to allocate hash %s, error %ld\n",
195                          chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
196                 chap->shash_tfm = NULL;
197                 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
198                 return -ENOMEM;
199         }
200
201         if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
202                 dev_warn(ctrl->device,
203                          "qid %d: invalid hash length %d\n",
204                          chap->qid, data->hl);
205                 crypto_free_shash(chap->shash_tfm);
206                 chap->shash_tfm = NULL;
207                 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
208                 return -EPROTO;
209         }
210
211         chap->hash_id = data->hashid;
212         chap->hash_len = data->hl;
213         dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
214                 chap->qid, hmac_name);
215
216 select_kpp:
217         kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
218         if (!kpp_name) {
219                 dev_warn(ctrl->device,
220                          "qid %d: invalid DH group id %d\n",
221                          chap->qid, data->dhgid);
222                 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
223                 /* Leave previous dh_tfm intact */
224                 return -EPROTO;
225         }
226
227         if (chap->dhgroup_id == data->dhgid &&
228             (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
229                 dev_dbg(ctrl->device,
230                         "qid %d: reuse existing DH group %s\n",
231                         chap->qid, gid_name);
232                 goto skip_kpp;
233         }
234
235         /* Reset dh_tfm if it can't be reused */
236         if (chap->dh_tfm) {
237                 crypto_free_kpp(chap->dh_tfm);
238                 chap->dh_tfm = NULL;
239         }
240
241         if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
242                 if (dhvlen == 0) {
243                         dev_warn(ctrl->device,
244                                  "qid %d: empty DH value\n",
245                                  chap->qid);
246                         chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
247                         return -EPROTO;
248                 }
249
250                 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
251                 if (IS_ERR(chap->dh_tfm)) {
252                         int ret = PTR_ERR(chap->dh_tfm);
253
254                         dev_warn(ctrl->device,
255                                  "qid %d: error %d initializing DH group %s\n",
256                                  chap->qid, ret, gid_name);
257                         chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
258                         chap->dh_tfm = NULL;
259                         return ret;
260                 }
261                 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
262                         chap->qid, gid_name);
263         } else if (dhvlen != 0) {
264                 dev_warn(ctrl->device,
265                          "qid %d: invalid DH value for NULL DH\n",
266                          chap->qid);
267                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
268                 return -EPROTO;
269         }
270         chap->dhgroup_id = data->dhgid;
271
272 skip_kpp:
273         chap->s1 = le32_to_cpu(data->seqnum);
274         memcpy(chap->c1, data->cval, chap->hash_len);
275         if (dhvlen) {
276                 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
277                 if (!chap->ctrl_key) {
278                         chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
279                         return -ENOMEM;
280                 }
281                 chap->ctrl_key_len = dhvlen;
282                 memcpy(chap->ctrl_key, data->cval + chap->hash_len,
283                        dhvlen);
284                 dev_dbg(ctrl->device, "ctrl public key %*ph\n",
285                          (int)chap->ctrl_key_len, chap->ctrl_key);
286         }
287
288         return 0;
289 }
290
291 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
292                 struct nvme_dhchap_queue_context *chap)
293 {
294         struct nvmf_auth_dhchap_reply_data *data = chap->buf;
295         size_t size = sizeof(*data);
296
297         size += 2 * chap->hash_len;
298
299         if (chap->host_key_len)
300                 size += chap->host_key_len;
301
302         if (size > CHAP_BUF_SIZE) {
303                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
304                 return -EINVAL;
305         }
306
307         memset(chap->buf, 0, size);
308         data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
309         data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
310         data->t_id = cpu_to_le16(chap->transaction);
311         data->hl = chap->hash_len;
312         data->dhvlen = cpu_to_le16(chap->host_key_len);
313         memcpy(data->rval, chap->response, chap->hash_len);
314         if (ctrl->ctrl_key) {
315                 get_random_bytes(chap->c2, chap->hash_len);
316                 data->cvalid = 1;
317                 chap->s2 = nvme_auth_get_seqnum();
318                 memcpy(data->rval + chap->hash_len, chap->c2,
319                        chap->hash_len);
320                 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
321                         __func__, chap->qid, (int)chap->hash_len, chap->c2);
322         } else {
323                 memset(chap->c2, 0, chap->hash_len);
324                 chap->s2 = 0;
325         }
326         data->seqnum = cpu_to_le32(chap->s2);
327         if (chap->host_key_len) {
328                 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
329                         __func__, chap->qid,
330                         chap->host_key_len, chap->host_key);
331                 memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
332                        chap->host_key_len);
333         }
334
335         return size;
336 }
337
338 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
339                 struct nvme_dhchap_queue_context *chap)
340 {
341         struct nvmf_auth_dhchap_success1_data *data = chap->buf;
342         size_t size = sizeof(*data);
343
344         if (chap->ctrl_key)
345                 size += chap->hash_len;
346
347         if (size > CHAP_BUF_SIZE) {
348                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
349                 return -EINVAL;
350         }
351
352         if (data->hl != chap->hash_len) {
353                 dev_warn(ctrl->device,
354                          "qid %d: invalid hash length %u\n",
355                          chap->qid, data->hl);
356                 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
357                 return -EPROTO;
358         }
359
360         /* Just print out information for the admin queue */
361         if (chap->qid == 0)
362                 dev_info(ctrl->device,
363                          "qid 0: authenticated with hash %s dhgroup %s\n",
364                          nvme_auth_hmac_name(chap->hash_id),
365                          nvme_auth_dhgroup_name(chap->dhgroup_id));
366
367         if (!data->rvalid)
368                 return 0;
369
370         /* Validate controller response */
371         if (memcmp(chap->response, data->rval, data->hl)) {
372                 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
373                         __func__, chap->qid, (int)chap->hash_len, data->rval);
374                 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
375                         __func__, chap->qid, (int)chap->hash_len,
376                         chap->response);
377                 dev_warn(ctrl->device,
378                          "qid %d: controller authentication failed\n",
379                          chap->qid);
380                 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
381                 return -ECONNREFUSED;
382         }
383
384         /* Just print out information for the admin queue */
385         if (chap->qid == 0)
386                 dev_info(ctrl->device,
387                          "qid 0: controller authenticated\n");
388         return 0;
389 }
390
391 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
392                 struct nvme_dhchap_queue_context *chap)
393 {
394         struct nvmf_auth_dhchap_success2_data *data = chap->buf;
395         size_t size = sizeof(*data);
396
397         memset(chap->buf, 0, size);
398         data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
399         data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
400         data->t_id = cpu_to_le16(chap->transaction);
401
402         return size;
403 }
404
405 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
406                 struct nvme_dhchap_queue_context *chap)
407 {
408         struct nvmf_auth_dhchap_failure_data *data = chap->buf;
409         size_t size = sizeof(*data);
410
411         memset(chap->buf, 0, size);
412         data->auth_type = NVME_AUTH_COMMON_MESSAGES;
413         data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
414         data->t_id = cpu_to_le16(chap->transaction);
415         data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
416         data->rescode_exp = chap->status;
417
418         return size;
419 }
420
421 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
422                 struct nvme_dhchap_queue_context *chap)
423 {
424         SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
425         u8 buf[4], *challenge = chap->c1;
426         int ret;
427
428         dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
429                 __func__, chap->qid, chap->s1, chap->transaction);
430
431         if (!chap->host_response) {
432                 chap->host_response = nvme_auth_transform_key(ctrl->host_key,
433                                                 ctrl->opts->host->nqn);
434                 if (IS_ERR(chap->host_response)) {
435                         ret = PTR_ERR(chap->host_response);
436                         chap->host_response = NULL;
437                         return ret;
438                 }
439         } else {
440                 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
441                         __func__, chap->qid);
442         }
443
444         ret = crypto_shash_setkey(chap->shash_tfm,
445                         chap->host_response, ctrl->host_key->len);
446         if (ret) {
447                 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
448                          chap->qid, ret);
449                 goto out;
450         }
451
452         if (chap->dh_tfm) {
453                 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
454                 if (!challenge) {
455                         ret = -ENOMEM;
456                         goto out;
457                 }
458                 ret = nvme_auth_augmented_challenge(chap->hash_id,
459                                                     chap->sess_key,
460                                                     chap->sess_key_len,
461                                                     chap->c1, challenge,
462                                                     chap->hash_len);
463                 if (ret)
464                         goto out;
465         }
466
467         shash->tfm = chap->shash_tfm;
468         ret = crypto_shash_init(shash);
469         if (ret)
470                 goto out;
471         ret = crypto_shash_update(shash, challenge, chap->hash_len);
472         if (ret)
473                 goto out;
474         put_unaligned_le32(chap->s1, buf);
475         ret = crypto_shash_update(shash, buf, 4);
476         if (ret)
477                 goto out;
478         put_unaligned_le16(chap->transaction, buf);
479         ret = crypto_shash_update(shash, buf, 2);
480         if (ret)
481                 goto out;
482         memset(buf, 0, sizeof(buf));
483         ret = crypto_shash_update(shash, buf, 1);
484         if (ret)
485                 goto out;
486         ret = crypto_shash_update(shash, "HostHost", 8);
487         if (ret)
488                 goto out;
489         ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
490                                   strlen(ctrl->opts->host->nqn));
491         if (ret)
492                 goto out;
493         ret = crypto_shash_update(shash, buf, 1);
494         if (ret)
495                 goto out;
496         ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
497                             strlen(ctrl->opts->subsysnqn));
498         if (ret)
499                 goto out;
500         ret = crypto_shash_final(shash, chap->response);
501 out:
502         if (challenge != chap->c1)
503                 kfree(challenge);
504         return ret;
505 }
506
507 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
508                 struct nvme_dhchap_queue_context *chap)
509 {
510         SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
511         u8 *ctrl_response;
512         u8 buf[4], *challenge = chap->c2;
513         int ret;
514
515         ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
516                                 ctrl->opts->subsysnqn);
517         if (IS_ERR(ctrl_response)) {
518                 ret = PTR_ERR(ctrl_response);
519                 return ret;
520         }
521
522         ret = crypto_shash_setkey(chap->shash_tfm,
523                         ctrl_response, ctrl->ctrl_key->len);
524         if (ret) {
525                 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
526                          chap->qid, ret);
527                 goto out;
528         }
529
530         if (chap->dh_tfm) {
531                 challenge = kmalloc(chap->hash_len, GFP_KERNEL);
532                 if (!challenge) {
533                         ret = -ENOMEM;
534                         goto out;
535                 }
536                 ret = nvme_auth_augmented_challenge(chap->hash_id,
537                                                     chap->sess_key,
538                                                     chap->sess_key_len,
539                                                     chap->c2, challenge,
540                                                     chap->hash_len);
541                 if (ret)
542                         goto out;
543         }
544         dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
545                 __func__, chap->qid, chap->s2, chap->transaction);
546         dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
547                 __func__, chap->qid, (int)chap->hash_len, challenge);
548         dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
549                 __func__, chap->qid, ctrl->opts->subsysnqn);
550         dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
551                 __func__, chap->qid, ctrl->opts->host->nqn);
552         shash->tfm = chap->shash_tfm;
553         ret = crypto_shash_init(shash);
554         if (ret)
555                 goto out;
556         ret = crypto_shash_update(shash, challenge, chap->hash_len);
557         if (ret)
558                 goto out;
559         put_unaligned_le32(chap->s2, buf);
560         ret = crypto_shash_update(shash, buf, 4);
561         if (ret)
562                 goto out;
563         put_unaligned_le16(chap->transaction, buf);
564         ret = crypto_shash_update(shash, buf, 2);
565         if (ret)
566                 goto out;
567         memset(buf, 0, 4);
568         ret = crypto_shash_update(shash, buf, 1);
569         if (ret)
570                 goto out;
571         ret = crypto_shash_update(shash, "Controller", 10);
572         if (ret)
573                 goto out;
574         ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
575                                   strlen(ctrl->opts->subsysnqn));
576         if (ret)
577                 goto out;
578         ret = crypto_shash_update(shash, buf, 1);
579         if (ret)
580                 goto out;
581         ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
582                                   strlen(ctrl->opts->host->nqn));
583         if (ret)
584                 goto out;
585         ret = crypto_shash_final(shash, chap->response);
586 out:
587         if (challenge != chap->c2)
588                 kfree(challenge);
589         kfree(ctrl_response);
590         return ret;
591 }
592
593 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
594                 struct nvme_dhchap_queue_context *chap)
595 {
596         int ret;
597
598         if (chap->host_key && chap->host_key_len) {
599                 dev_dbg(ctrl->device,
600                         "qid %d: reusing host key\n", chap->qid);
601                 goto gen_sesskey;
602         }
603         ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
604         if (ret < 0) {
605                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
606                 return ret;
607         }
608
609         chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
610
611         chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
612         if (!chap->host_key) {
613                 chap->host_key_len = 0;
614                 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
615                 return -ENOMEM;
616         }
617         ret = nvme_auth_gen_pubkey(chap->dh_tfm,
618                                    chap->host_key, chap->host_key_len);
619         if (ret) {
620                 dev_dbg(ctrl->device,
621                         "failed to generate public key, error %d\n", ret);
622                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
623                 return ret;
624         }
625
626 gen_sesskey:
627         chap->sess_key_len = chap->host_key_len;
628         chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
629         if (!chap->sess_key) {
630                 chap->sess_key_len = 0;
631                 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
632                 return -ENOMEM;
633         }
634
635         ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
636                                           chap->ctrl_key, chap->ctrl_key_len,
637                                           chap->sess_key, chap->sess_key_len);
638         if (ret) {
639                 dev_dbg(ctrl->device,
640                         "failed to generate shared secret, error %d\n", ret);
641                 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
642                 return ret;
643         }
644         dev_dbg(ctrl->device, "shared secret %*ph\n",
645                 (int)chap->sess_key_len, chap->sess_key);
646         return 0;
647 }
648
649 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
650 {
651         kfree_sensitive(chap->host_response);
652         chap->host_response = NULL;
653         kfree_sensitive(chap->host_key);
654         chap->host_key = NULL;
655         chap->host_key_len = 0;
656         kfree_sensitive(chap->ctrl_key);
657         chap->ctrl_key = NULL;
658         chap->ctrl_key_len = 0;
659         kfree_sensitive(chap->sess_key);
660         chap->sess_key = NULL;
661         chap->sess_key_len = 0;
662         chap->status = 0;
663         chap->error = 0;
664         chap->s1 = 0;
665         chap->s2 = 0;
666         chap->transaction = 0;
667         memset(chap->c1, 0, sizeof(chap->c1));
668         memset(chap->c2, 0, sizeof(chap->c2));
669         mempool_free(chap->buf, nvme_chap_buf_pool);
670         chap->buf = NULL;
671 }
672
673 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
674 {
675         nvme_auth_reset_dhchap(chap);
676         if (chap->shash_tfm)
677                 crypto_free_shash(chap->shash_tfm);
678         if (chap->dh_tfm)
679                 crypto_free_kpp(chap->dh_tfm);
680 }
681
682 static void nvme_queue_auth_work(struct work_struct *work)
683 {
684         struct nvme_dhchap_queue_context *chap =
685                 container_of(work, struct nvme_dhchap_queue_context, auth_work);
686         struct nvme_ctrl *ctrl = chap->ctrl;
687         size_t tl;
688         int ret = 0;
689
690         /*
691          * Allocate a large enough buffer for the entire negotiation:
692          * 4k is enough to ffdhe8192.
693          */
694         chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
695         if (!chap->buf) {
696                 chap->error = -ENOMEM;
697                 return;
698         }
699
700         chap->transaction = ctrl->transaction++;
701
702         /* DH-HMAC-CHAP Step 1: send negotiate */
703         dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
704                 __func__, chap->qid);
705         ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
706         if (ret < 0) {
707                 chap->error = ret;
708                 return;
709         }
710         tl = ret;
711         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
712         if (ret) {
713                 chap->error = ret;
714                 return;
715         }
716
717         /* DH-HMAC-CHAP Step 2: receive challenge */
718         dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
719                 __func__, chap->qid);
720
721         memset(chap->buf, 0, CHAP_BUF_SIZE);
722         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
723                                false);
724         if (ret) {
725                 dev_warn(ctrl->device,
726                          "qid %d failed to receive challenge, %s %d\n",
727                          chap->qid, ret < 0 ? "error" : "nvme status", ret);
728                 chap->error = ret;
729                 return;
730         }
731         ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
732                                          NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
733         if (ret) {
734                 chap->status = ret;
735                 chap->error = -ECONNREFUSED;
736                 return;
737         }
738
739         ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
740         if (ret) {
741                 /* Invalid challenge parameters */
742                 chap->error = ret;
743                 goto fail2;
744         }
745
746         if (chap->ctrl_key_len) {
747                 dev_dbg(ctrl->device,
748                         "%s: qid %d DH exponential\n",
749                         __func__, chap->qid);
750                 ret = nvme_auth_dhchap_exponential(ctrl, chap);
751                 if (ret) {
752                         chap->error = ret;
753                         goto fail2;
754                 }
755         }
756
757         dev_dbg(ctrl->device, "%s: qid %d host response\n",
758                 __func__, chap->qid);
759         mutex_lock(&ctrl->dhchap_auth_mutex);
760         ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
761         if (ret) {
762                 mutex_unlock(&ctrl->dhchap_auth_mutex);
763                 chap->error = ret;
764                 goto fail2;
765         }
766         mutex_unlock(&ctrl->dhchap_auth_mutex);
767
768         /* DH-HMAC-CHAP Step 3: send reply */
769         dev_dbg(ctrl->device, "%s: qid %d send reply\n",
770                 __func__, chap->qid);
771         ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
772         if (ret < 0) {
773                 chap->error = ret;
774                 goto fail2;
775         }
776
777         tl = ret;
778         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
779         if (ret) {
780                 chap->error = ret;
781                 goto fail2;
782         }
783
784         /* DH-HMAC-CHAP Step 4: receive success1 */
785         dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
786                 __func__, chap->qid);
787
788         memset(chap->buf, 0, CHAP_BUF_SIZE);
789         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
790                                false);
791         if (ret) {
792                 dev_warn(ctrl->device,
793                          "qid %d failed to receive success1, %s %d\n",
794                          chap->qid, ret < 0 ? "error" : "nvme status", ret);
795                 chap->error = ret;
796                 return;
797         }
798         ret = nvme_auth_receive_validate(ctrl, chap->qid,
799                                          chap->buf, chap->transaction,
800                                          NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
801         if (ret) {
802                 chap->status = ret;
803                 chap->error = -ECONNREFUSED;
804                 return;
805         }
806
807         mutex_lock(&ctrl->dhchap_auth_mutex);
808         if (ctrl->ctrl_key) {
809                 dev_dbg(ctrl->device,
810                         "%s: qid %d controller response\n",
811                         __func__, chap->qid);
812                 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
813                 if (ret) {
814                         mutex_unlock(&ctrl->dhchap_auth_mutex);
815                         chap->error = ret;
816                         goto fail2;
817                 }
818         }
819         mutex_unlock(&ctrl->dhchap_auth_mutex);
820
821         ret = nvme_auth_process_dhchap_success1(ctrl, chap);
822         if (ret) {
823                 /* Controller authentication failed */
824                 chap->error = -ECONNREFUSED;
825                 goto fail2;
826         }
827
828         if (chap->ctrl_key) {
829                 /* DH-HMAC-CHAP Step 5: send success2 */
830                 dev_dbg(ctrl->device, "%s: qid %d send success2\n",
831                         __func__, chap->qid);
832                 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
833                 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
834                 if (ret)
835                         chap->error = ret;
836         }
837         if (!ret) {
838                 chap->error = 0;
839                 return;
840         }
841
842 fail2:
843         dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
844                 __func__, chap->qid, chap->status);
845         tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
846         ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
847         /*
848          * only update error if send failure2 failed and no other
849          * error had been set during authentication.
850          */
851         if (ret && !chap->error)
852                 chap->error = ret;
853 }
854
855 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
856 {
857         struct nvme_dhchap_queue_context *chap;
858
859         if (!ctrl->host_key) {
860                 dev_warn(ctrl->device, "qid %d: no key\n", qid);
861                 return -ENOKEY;
862         }
863
864         if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
865                 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
866                 return -ENOKEY;
867         }
868
869         chap = &ctrl->dhchap_ctxs[qid];
870         cancel_work_sync(&chap->auth_work);
871         queue_work(nvme_auth_wq, &chap->auth_work);
872         return 0;
873 }
874 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
875
876 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
877 {
878         struct nvme_dhchap_queue_context *chap;
879         int ret;
880
881         chap = &ctrl->dhchap_ctxs[qid];
882         flush_work(&chap->auth_work);
883         ret = chap->error;
884         /* clear sensitive info */
885         nvme_auth_reset_dhchap(chap);
886         return ret;
887 }
888 EXPORT_SYMBOL_GPL(nvme_auth_wait);
889
890 static void nvme_ctrl_auth_work(struct work_struct *work)
891 {
892         struct nvme_ctrl *ctrl =
893                 container_of(work, struct nvme_ctrl, dhchap_auth_work);
894         int ret, q;
895
896         /*
897          * If the ctrl is no connected, bail as reconnect will handle
898          * authentication.
899          */
900         if (ctrl->state != NVME_CTRL_LIVE)
901                 return;
902
903         /* Authenticate admin queue first */
904         ret = nvme_auth_negotiate(ctrl, 0);
905         if (ret) {
906                 dev_warn(ctrl->device,
907                          "qid 0: error %d setting up authentication\n", ret);
908                 return;
909         }
910         ret = nvme_auth_wait(ctrl, 0);
911         if (ret) {
912                 dev_warn(ctrl->device,
913                          "qid 0: authentication failed\n");
914                 return;
915         }
916
917         for (q = 1; q < ctrl->queue_count; q++) {
918                 ret = nvme_auth_negotiate(ctrl, q);
919                 if (ret) {
920                         dev_warn(ctrl->device,
921                                  "qid %d: error %d setting up authentication\n",
922                                  q, ret);
923                         break;
924                 }
925         }
926
927         /*
928          * Failure is a soft-state; credentials remain valid until
929          * the controller terminates the connection.
930          */
931         for (q = 1; q < ctrl->queue_count; q++) {
932                 ret = nvme_auth_wait(ctrl, q);
933                 if (ret)
934                         dev_warn(ctrl->device,
935                                  "qid %d: authentication failed\n", q);
936         }
937 }
938
939 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
940 {
941         struct nvme_dhchap_queue_context *chap;
942         int i, ret;
943
944         mutex_init(&ctrl->dhchap_auth_mutex);
945         INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
946         if (!ctrl->opts)
947                 return 0;
948         ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
949                         &ctrl->host_key);
950         if (ret)
951                 return ret;
952         ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
953                         &ctrl->ctrl_key);
954         if (ret)
955                 goto err_free_dhchap_secret;
956
957         if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
958                 return 0;
959
960         ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
961                                 sizeof(*chap), GFP_KERNEL);
962         if (!ctrl->dhchap_ctxs) {
963                 ret = -ENOMEM;
964                 goto err_free_dhchap_ctrl_secret;
965         }
966
967         for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
968                 chap = &ctrl->dhchap_ctxs[i];
969                 chap->qid = i;
970                 chap->ctrl = ctrl;
971                 INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
972         }
973
974         return 0;
975 err_free_dhchap_ctrl_secret:
976         nvme_auth_free_key(ctrl->ctrl_key);
977         ctrl->ctrl_key = NULL;
978 err_free_dhchap_secret:
979         nvme_auth_free_key(ctrl->host_key);
980         ctrl->host_key = NULL;
981         return ret;
982 }
983 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
984
985 void nvme_auth_stop(struct nvme_ctrl *ctrl)
986 {
987         cancel_work_sync(&ctrl->dhchap_auth_work);
988 }
989 EXPORT_SYMBOL_GPL(nvme_auth_stop);
990
991 void nvme_auth_free(struct nvme_ctrl *ctrl)
992 {
993         int i;
994
995         if (ctrl->dhchap_ctxs) {
996                 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
997                         nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
998                 kfree(ctrl->dhchap_ctxs);
999         }
1000         if (ctrl->host_key) {
1001                 nvme_auth_free_key(ctrl->host_key);
1002                 ctrl->host_key = NULL;
1003         }
1004         if (ctrl->ctrl_key) {
1005                 nvme_auth_free_key(ctrl->ctrl_key);
1006                 ctrl->ctrl_key = NULL;
1007         }
1008 }
1009 EXPORT_SYMBOL_GPL(nvme_auth_free);
1010
1011 int __init nvme_init_auth(void)
1012 {
1013         nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1014                                WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1015         if (!nvme_auth_wq)
1016                 return -ENOMEM;
1017
1018         nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1019                                 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1020         if (!nvme_chap_buf_cache)
1021                 goto err_destroy_workqueue;
1022
1023         nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1024                         mempool_free_slab, nvme_chap_buf_cache);
1025         if (!nvme_chap_buf_pool)
1026                 goto err_destroy_chap_buf_cache;
1027
1028         return 0;
1029 err_destroy_chap_buf_cache:
1030         kmem_cache_destroy(nvme_chap_buf_cache);
1031 err_destroy_workqueue:
1032         destroy_workqueue(nvme_auth_wq);
1033         return -ENOMEM;
1034 }
1035
1036 void __exit nvme_exit_auth(void)
1037 {
1038         mempool_destroy(nvme_chap_buf_pool);
1039         kmem_cache_destroy(nvme_chap_buf_cache);
1040         destroy_workqueue(nvme_auth_wq);
1041 }