net/mlx5: Add Fast teardown support
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / fw.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/cmd.h>
35 #include <linux/mlx5/eswitch.h>
36 #include <linux/module.h>
37 #include "mlx5_core.h"
38 #include "../../mlxfw/mlxfw.h"
39
40 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
41                                   int outlen)
42 {
43         u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
44
45         MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
46         return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
47 }
48
49 int mlx5_query_board_id(struct mlx5_core_dev *dev)
50 {
51         u32 *out;
52         int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
53         int err;
54
55         out = kzalloc(outlen, GFP_KERNEL);
56         if (!out)
57                 return -ENOMEM;
58
59         err = mlx5_cmd_query_adapter(dev, out, outlen);
60         if (err)
61                 goto out;
62
63         memcpy(dev->board_id,
64                MLX5_ADDR_OF(query_adapter_out, out,
65                             query_adapter_struct.vsd_contd_psid),
66                MLX5_FLD_SZ_BYTES(query_adapter_out,
67                                  query_adapter_struct.vsd_contd_psid));
68
69 out:
70         kfree(out);
71         return err;
72 }
73
74 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
75 {
76         u32 *out;
77         int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
78         int err;
79
80         out = kzalloc(outlen, GFP_KERNEL);
81         if (!out)
82                 return -ENOMEM;
83
84         err = mlx5_cmd_query_adapter(mdev, out, outlen);
85         if (err)
86                 goto out;
87
88         *vendor_id = MLX5_GET(query_adapter_out, out,
89                               query_adapter_struct.ieee_vendor_id);
90 out:
91         kfree(out);
92         return err;
93 }
94 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
95
96 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
97 {
98         return mlx5_query_pcam_reg(dev, dev->caps.pcam,
99                                    MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
100                                    MLX5_PCAM_REGS_5000_TO_507F);
101 }
102
103 static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
104 {
105         return mlx5_query_mcam_reg(dev, dev->caps.mcam,
106                                    MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
107                                    MLX5_MCAM_REGS_FIRST_128);
108 }
109
110 static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
111 {
112         return mlx5_query_qcam_reg(dev, dev->caps.qcam,
113                                    MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
114                                    MLX5_QCAM_REGS_FIRST_128);
115 }
116
117 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
118 {
119         int err;
120
121         err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
122         if (err)
123                 return err;
124
125         if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
126                 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
127                 if (err)
128                         return err;
129         }
130
131         if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
132                 err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS);
133                 if (err)
134                         return err;
135         }
136
137         if (MLX5_CAP_GEN(dev, pg)) {
138                 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
139                 if (err)
140                         return err;
141         }
142
143         if (MLX5_CAP_GEN(dev, atomic)) {
144                 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
145                 if (err)
146                         return err;
147         }
148
149         if (MLX5_CAP_GEN(dev, roce)) {
150                 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
151                 if (err)
152                         return err;
153         }
154
155         if (MLX5_CAP_GEN(dev, nic_flow_table) ||
156             MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
157                 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
158                 if (err)
159                         return err;
160         }
161
162         if (MLX5_CAP_GEN(dev, vport_group_manager) &&
163             MLX5_ESWITCH_MANAGER(dev)) {
164                 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
165                 if (err)
166                         return err;
167         }
168
169         if (MLX5_ESWITCH_MANAGER(dev)) {
170                 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
171                 if (err)
172                         return err;
173         }
174
175         if (MLX5_CAP_GEN(dev, vector_calc)) {
176                 err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
177                 if (err)
178                         return err;
179         }
180
181         if (MLX5_CAP_GEN(dev, qos)) {
182                 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
183                 if (err)
184                         return err;
185         }
186
187         if (MLX5_CAP_GEN(dev, debug))
188                 mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
189
190         if (MLX5_CAP_GEN(dev, pcam_reg))
191                 mlx5_get_pcam_reg(dev);
192
193         if (MLX5_CAP_GEN(dev, mcam_reg))
194                 mlx5_get_mcam_reg(dev);
195
196         if (MLX5_CAP_GEN(dev, qcam_reg))
197                 mlx5_get_qcam_reg(dev);
198
199         if (MLX5_CAP_GEN(dev, device_memory)) {
200                 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM);
201                 if (err)
202                         return err;
203         }
204
205         return 0;
206 }
207
208 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
209 {
210         u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
211         u32 in[MLX5_ST_SZ_DW(init_hca_in)]   = {0};
212         int i;
213
214         MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
215
216         if (MLX5_CAP_GEN(dev, sw_owner_id)) {
217                 for (i = 0; i < 4; i++)
218                         MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i,
219                                        sw_owner_id[i]);
220         }
221
222         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
223 }
224
225 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
226 {
227         u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
228         u32 in[MLX5_ST_SZ_DW(teardown_hca_in)]   = {0};
229
230         MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
231         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
232 }
233
234 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
235 {
236         u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
237         u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
238         int force_state;
239         int ret;
240
241         if (!MLX5_CAP_GEN(dev, force_teardown)) {
242                 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
243                 return -EOPNOTSUPP;
244         }
245
246         MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
247         MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
248
249         ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
250         if (ret)
251                 return ret;
252
253         force_state = MLX5_GET(teardown_hca_out, out, state);
254         if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
255                 mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
256                 return -EIO;
257         }
258
259         return 0;
260 }
261
262 #define MLX5_FAST_TEARDOWN_WAIT_MS   3000
263 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
264 {
265         unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
266         u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
267         u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
268         int state;
269         int ret;
270
271         if (!MLX5_CAP_GEN(dev, fast_teardown)) {
272                 mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
273                 return -EOPNOTSUPP;
274         }
275
276         MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
277         MLX5_SET(teardown_hca_in, in, profile,
278                  MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
279
280         ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
281         if (ret)
282                 return ret;
283
284         state = MLX5_GET(teardown_hca_out, out, state);
285         if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
286                 mlx5_core_warn(dev, "teardown with fast mode failed\n");
287                 return -EIO;
288         }
289
290         mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
291
292         /* Loop until device state turns to disable */
293         end = jiffies + msecs_to_jiffies(delay_ms);
294         do {
295                 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
296                         break;
297
298                 cond_resched();
299         } while (!time_after(jiffies, end));
300
301         if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
302                 dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
303                         mlx5_get_nic_state(dev), delay_ms);
304                 return -EIO;
305         }
306
307         return 0;
308 }
309
310 enum mlxsw_reg_mcc_instruction {
311         MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
312         MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
313         MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
314         MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
315         MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
316         MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
317 };
318
319 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
320                             enum mlxsw_reg_mcc_instruction instr,
321                             u16 component_index, u32 update_handle,
322                             u32 component_size)
323 {
324         u32 out[MLX5_ST_SZ_DW(mcc_reg)];
325         u32 in[MLX5_ST_SZ_DW(mcc_reg)];
326
327         memset(in, 0, sizeof(in));
328
329         MLX5_SET(mcc_reg, in, instruction, instr);
330         MLX5_SET(mcc_reg, in, component_index, component_index);
331         MLX5_SET(mcc_reg, in, update_handle, update_handle);
332         MLX5_SET(mcc_reg, in, component_size, component_size);
333
334         return mlx5_core_access_reg(dev, in, sizeof(in), out,
335                                     sizeof(out), MLX5_REG_MCC, 0, 1);
336 }
337
338 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
339                               u32 *update_handle, u8 *error_code,
340                               u8 *control_state)
341 {
342         u32 out[MLX5_ST_SZ_DW(mcc_reg)];
343         u32 in[MLX5_ST_SZ_DW(mcc_reg)];
344         int err;
345
346         memset(in, 0, sizeof(in));
347         memset(out, 0, sizeof(out));
348         MLX5_SET(mcc_reg, in, update_handle, *update_handle);
349
350         err = mlx5_core_access_reg(dev, in, sizeof(in), out,
351                                    sizeof(out), MLX5_REG_MCC, 0, 0);
352         if (err)
353                 goto out;
354
355         *update_handle = MLX5_GET(mcc_reg, out, update_handle);
356         *error_code = MLX5_GET(mcc_reg, out, error_code);
357         *control_state = MLX5_GET(mcc_reg, out, control_state);
358
359 out:
360         return err;
361 }
362
363 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
364                              u32 update_handle,
365                              u32 offset, u16 size,
366                              u8 *data)
367 {
368         int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
369         u32 out[MLX5_ST_SZ_DW(mcda_reg)];
370         int i, j, dw_size = size >> 2;
371         __be32 data_element;
372         u32 *in;
373
374         in = kzalloc(in_size, GFP_KERNEL);
375         if (!in)
376                 return -ENOMEM;
377
378         MLX5_SET(mcda_reg, in, update_handle, update_handle);
379         MLX5_SET(mcda_reg, in, offset, offset);
380         MLX5_SET(mcda_reg, in, size, size);
381
382         for (i = 0; i < dw_size; i++) {
383                 j = i * 4;
384                 data_element = htonl(*(u32 *)&data[j]);
385                 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
386         }
387
388         err = mlx5_core_access_reg(dev, in, in_size, out,
389                                    sizeof(out), MLX5_REG_MCDA, 0, 1);
390         kfree(in);
391         return err;
392 }
393
394 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
395                                u16 component_index,
396                                u32 *max_component_size,
397                                u8 *log_mcda_word_size,
398                                u16 *mcda_max_write_size)
399 {
400         u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
401         int offset = MLX5_ST_SZ_DW(mcqi_reg);
402         u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
403         int err;
404
405         memset(in, 0, sizeof(in));
406         memset(out, 0, sizeof(out));
407
408         MLX5_SET(mcqi_reg, in, component_index, component_index);
409         MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
410
411         err = mlx5_core_access_reg(dev, in, sizeof(in), out,
412                                    sizeof(out), MLX5_REG_MCQI, 0, 0);
413         if (err)
414                 goto out;
415
416         *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
417         *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
418         *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
419
420 out:
421         return err;
422 }
423
424 struct mlx5_mlxfw_dev {
425         struct mlxfw_dev mlxfw_dev;
426         struct mlx5_core_dev *mlx5_core_dev;
427 };
428
429 static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
430                                 u16 component_index, u32 *p_max_size,
431                                 u8 *p_align_bits, u16 *p_max_write_size)
432 {
433         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
434                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
435         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
436
437         return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
438                                    p_align_bits, p_max_write_size);
439 }
440
441 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
442 {
443         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
444                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
445         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
446         u8 control_state, error_code;
447         int err;
448
449         *fwhandle = 0;
450         err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
451         if (err)
452                 return err;
453
454         if (control_state != MLXFW_FSM_STATE_IDLE)
455                 return -EBUSY;
456
457         return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
458                                 0, *fwhandle, 0);
459 }
460
461 static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
462                                      u16 component_index, u32 component_size)
463 {
464         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
465                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
466         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
467
468         return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
469                                 component_index, fwhandle, component_size);
470 }
471
472 static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
473                                    u8 *data, u16 size, u32 offset)
474 {
475         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
476                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
477         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
478
479         return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
480 }
481
482 static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
483                                      u16 component_index)
484 {
485         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
486                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
487         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
488
489         return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
490                                 component_index, fwhandle, 0);
491 }
492
493 static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
494 {
495         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
496                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
497         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
498
499         return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
500                                 fwhandle, 0);
501 }
502
503 static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
504                                 enum mlxfw_fsm_state *fsm_state,
505                                 enum mlxfw_fsm_state_err *fsm_state_err)
506 {
507         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
508                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
509         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
510         u8 control_state, error_code;
511         int err;
512
513         err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
514         if (err)
515                 return err;
516
517         *fsm_state = control_state;
518         *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
519                                MLXFW_FSM_STATE_ERR_MAX);
520         return 0;
521 }
522
523 static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
524 {
525         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
526                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
527         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
528
529         mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
530 }
531
532 static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
533 {
534         struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
535                 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
536         struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
537
538         mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
539                          fwhandle, 0);
540 }
541
542 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
543         .component_query        = mlx5_component_query,
544         .fsm_lock               = mlx5_fsm_lock,
545         .fsm_component_update   = mlx5_fsm_component_update,
546         .fsm_block_download     = mlx5_fsm_block_download,
547         .fsm_component_verify   = mlx5_fsm_component_verify,
548         .fsm_activate           = mlx5_fsm_activate,
549         .fsm_query_state        = mlx5_fsm_query_state,
550         .fsm_cancel             = mlx5_fsm_cancel,
551         .fsm_release            = mlx5_fsm_release
552 };
553
554 int mlx5_firmware_flash(struct mlx5_core_dev *dev,
555                         const struct firmware *firmware)
556 {
557         struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
558                 .mlxfw_dev = {
559                         .ops = &mlx5_mlxfw_dev_ops,
560                         .psid = dev->board_id,
561                         .psid_size = strlen(dev->board_id),
562                 },
563                 .mlx5_core_dev = dev
564         };
565
566         if (!MLX5_CAP_GEN(dev, mcam_reg)  ||
567             !MLX5_CAP_MCAM_REG(dev, mcqi) ||
568             !MLX5_CAP_MCAM_REG(dev, mcc)  ||
569             !MLX5_CAP_MCAM_REG(dev, mcda)) {
570                 pr_info("%s flashing isn't supported by the running FW\n", __func__);
571                 return -EOPNOTSUPP;
572         }
573
574         return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
575 }