2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2015 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * PCIe NTB Perf Linux driver
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/kthread.h>
50 #include <linux/time.h>
51 #include <linux/timer.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/pci.h>
54 #include <linux/slab.h>
55 #include <linux/spinlock.h>
56 #include <linux/debugfs.h>
57 #include <linux/dmaengine.h>
58 #include <linux/delay.h>
59 #include <linux/sizes.h>
60 #include <linux/ntb.h>
61 #include <linux/mutex.h>
63 #define DRIVER_NAME "ntb_perf"
64 #define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
66 #define DRIVER_LICENSE "Dual BSD/GPL"
67 #define DRIVER_VERSION "1.0"
68 #define DRIVER_AUTHOR "Dave Jiang <dave.jiang@intel.com>"
70 #define PERF_LINK_DOWN_TIMEOUT 10
71 #define PERF_VERSION 0xffff0001
72 #define MAX_THREADS 32
73 #define MAX_TEST_SIZE SZ_1M
75 #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
76 #define DMA_RETRIES 20
77 #define SZ_4G (1ULL << 32)
78 #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
79 #define PIDX NTB_DEF_PEER_IDX
81 MODULE_LICENSE(DRIVER_LICENSE);
82 MODULE_VERSION(DRIVER_VERSION);
83 MODULE_AUTHOR(DRIVER_AUTHOR);
84 MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
86 static struct dentry *perf_debugfs_dir;
88 static unsigned long max_mw_size;
89 module_param(max_mw_size, ulong, 0644);
90 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
92 static unsigned int seg_order = 19; /* 512K */
93 module_param(seg_order, uint, 0644);
94 MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
96 static unsigned int run_order = 32; /* 4G */
97 module_param(run_order, uint, 0644);
98 MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
100 static bool use_dma; /* default to 0 */
101 module_param(use_dma, bool, 0644);
102 MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
104 static bool on_node = true; /* default to 1 */
105 module_param(on_node, bool, 0644);
106 MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)");
109 phys_addr_t phys_addr;
110 resource_size_t phys_size;
111 resource_size_t xlat_align;
112 resource_size_t xlat_align_size;
123 struct task_struct *thread;
124 struct perf_ctx *perf;
126 struct dma_chan *dma_chan;
129 void *srcs[MAX_SRCS];
130 wait_queue_head_t *wq;
141 struct delayed_work link_work;
142 wait_queue_head_t link_wq;
144 /* mutex ensures only one set of threads run at once */
145 struct mutex run_mutex;
146 struct pthr_ctx pthr_ctx[MAX_THREADS];
158 static void perf_link_event(void *ctx)
160 struct perf_ctx *perf = ctx;
162 if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
163 schedule_delayed_work(&perf->link_work, 2*HZ);
165 dev_dbg(&perf->ntb->pdev->dev, "link down\n");
167 if (!perf->link_is_up)
168 cancel_delayed_work_sync(&perf->link_work);
170 perf->link_is_up = false;
174 static void perf_db_event(void *ctx, int vec)
176 struct perf_ctx *perf = ctx;
177 u64 db_bits, db_mask;
179 db_mask = ntb_db_vector_mask(perf->ntb, vec);
180 db_bits = ntb_db_read(perf->ntb);
182 dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
183 vec, db_mask, db_bits);
186 static const struct ntb_ctx_ops perf_ops = {
187 .link_event = perf_link_event,
188 .db_event = perf_db_event,
191 static void perf_copy_callback(void *data)
193 struct pthr_ctx *pctx = data;
195 atomic_dec(&pctx->dma_sync);
198 static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
199 char *src, size_t size)
201 struct perf_ctx *perf = pctx->perf;
202 struct dma_async_tx_descriptor *txd;
203 struct dma_chan *chan = pctx->dma_chan;
204 struct dma_device *device;
205 struct dmaengine_unmap_data *unmap;
207 size_t src_off, dst_off;
208 struct perf_mw *mw = &perf->mw;
210 void __iomem *dst_vaddr;
215 memcpy_toio(dst, src, size);
220 dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
224 device = chan->device;
225 src_off = (uintptr_t)src & ~PAGE_MASK;
226 dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
228 if (!is_dma_copy_aligned(device, src_off, dst_off, size))
233 dst_phys = mw->phys_addr + (dst_vaddr - vbase);
235 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
240 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
241 src_off, size, DMA_TO_DEVICE);
242 if (dma_mapping_error(device->dev, unmap->addr[0]))
248 txd = device->device_prep_dma_memcpy(chan, dst_phys,
250 size, DMA_PREP_INTERRUPT);
252 set_current_state(TASK_INTERRUPTIBLE);
253 schedule_timeout(DMA_OUT_RESOURCE_TO);
255 } while (!txd && (++retries < DMA_RETRIES));
258 pctx->dma_prep_err++;
262 txd->callback = perf_copy_callback;
263 txd->callback_param = pctx;
264 dma_set_unmap(txd, unmap);
266 cookie = dmaengine_submit(txd);
267 if (dma_submit_error(cookie))
270 dmaengine_unmap_put(unmap);
272 atomic_inc(&pctx->dma_sync);
273 dma_async_issue_pending(chan);
278 dmaengine_unmap_put(unmap);
280 dmaengine_unmap_put(unmap);
284 static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
285 u64 buf_size, u64 win_size, u64 total)
287 int chunks, total_chunks, i;
288 int copied_chunks = 0;
289 u64 copied = 0, result;
290 char __iomem *tmp = dst;
292 ktime_t kstart, kstop, kdiff;
293 unsigned long last_sleep = jiffies;
295 chunks = div64_u64(win_size, buf_size);
296 total_chunks = div64_u64(total, buf_size);
297 kstart = ktime_get();
299 for (i = 0; i < total_chunks; i++) {
300 result = perf_copy(pctx, tmp, src, buf_size);
303 if (copied_chunks == chunks) {
309 /* Probably should schedule every 5s to prevent soft hang. */
310 if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
311 last_sleep = jiffies;
312 set_current_state(TASK_INTERRUPTIBLE);
316 if (unlikely(kthread_should_stop()))
321 pr_debug("%s: All DMA descriptors submitted\n", current->comm);
322 while (atomic_read(&pctx->dma_sync) != 0) {
323 if (kthread_should_stop())
330 kdiff = ktime_sub(kstop, kstart);
331 diff_us = ktime_to_us(kdiff);
333 pr_debug("%s: copied %llu bytes\n", current->comm, copied);
335 pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
337 perf = div64_u64(copied, diff_us);
339 pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
341 pctx->copied = copied;
342 pctx->diff_us = diff_us;
347 static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
349 /* Is the channel required to be on the same node as the device? */
353 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
356 static int ntb_perf_thread(void *data)
358 struct pthr_ctx *pctx = data;
359 struct perf_ctx *perf = pctx->perf;
360 struct pci_dev *pdev = perf->ntb->pdev;
361 struct perf_mw *mw = &perf->mw;
363 u64 win_size, buf_size, total;
366 struct dma_chan *dma_chan = NULL;
368 pr_debug("kthread %s starting...\n", current->comm);
370 node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
372 if (use_dma && !pctx->dma_chan) {
373 dma_cap_mask_t dma_mask;
375 dma_cap_zero(dma_mask);
376 dma_cap_set(DMA_MEMCPY, dma_mask);
377 dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
378 (void *)(unsigned long)node);
380 pr_warn("%s: cannot acquire DMA channel, quitting\n",
384 pctx->dma_chan = dma_chan;
387 for (i = 0; i < MAX_SRCS; i++) {
388 pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
389 if (!pctx->srcs[i]) {
395 win_size = mw->phys_size;
396 buf_size = 1ULL << seg_order;
397 total = 1ULL << run_order;
399 if (buf_size > MAX_TEST_SIZE)
400 buf_size = MAX_TEST_SIZE;
402 dst = (char __iomem *)mw->vbase;
404 atomic_inc(&perf->tsync);
405 while (atomic_read(&perf->tsync) != perf->perf_threads)
408 src = pctx->srcs[pctx->src_idx];
409 pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
411 rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
413 atomic_dec(&perf->tsync);
416 pr_err("%s: failed\n", current->comm);
421 for (i = 0; i < MAX_SRCS; i++) {
422 kfree(pctx->srcs[i]);
423 pctx->srcs[i] = NULL;
426 atomic_inc(&perf->tdone);
432 for (i = 0; i < MAX_SRCS; i++) {
433 kfree(pctx->srcs[i]);
434 pctx->srcs[i] = NULL;
438 dma_release_channel(dma_chan);
439 pctx->dma_chan = NULL;
443 /* Wait until we are told to stop */
445 set_current_state(TASK_INTERRUPTIBLE);
446 if (kthread_should_stop())
450 __set_current_state(TASK_RUNNING);
455 static void perf_free_mw(struct perf_ctx *perf)
457 struct perf_mw *mw = &perf->mw;
458 struct pci_dev *pdev = perf->ntb->pdev;
463 ntb_mw_clear_trans(perf->ntb, PIDX, 0);
464 dma_free_coherent(&pdev->dev, mw->buf_size,
465 mw->virt_addr, mw->dma_addr);
468 mw->virt_addr = NULL;
471 static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
473 struct perf_mw *mw = &perf->mw;
474 size_t xlat_size, buf_size;
480 xlat_size = round_up(size, mw->xlat_align_size);
481 buf_size = round_up(size, mw->xlat_align);
483 if (mw->xlat_size == xlat_size)
489 mw->xlat_size = xlat_size;
490 mw->buf_size = buf_size;
492 mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
493 &mw->dma_addr, GFP_KERNEL);
494 if (!mw->virt_addr) {
499 rc = ntb_mw_set_trans(perf->ntb, PIDX, 0, mw->dma_addr, mw->xlat_size);
501 dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
509 static void perf_link_work(struct work_struct *work)
511 struct perf_ctx *perf =
512 container_of(work, struct perf_ctx, link_work.work);
513 struct ntb_dev *ndev = perf->ntb;
514 struct pci_dev *pdev = ndev->pdev;
519 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
521 size = perf->mw.phys_size;
523 if (max_mw_size && size > max_mw_size)
526 ntb_peer_spad_write(ndev, PIDX, MW_SZ_HIGH, upper_32_bits(size));
527 ntb_peer_spad_write(ndev, PIDX, MW_SZ_LOW, lower_32_bits(size));
528 ntb_peer_spad_write(ndev, PIDX, VERSION, PERF_VERSION);
530 /* now read what peer wrote */
531 val = ntb_spad_read(ndev, VERSION);
532 if (val != PERF_VERSION) {
533 dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
537 val = ntb_spad_read(ndev, MW_SZ_HIGH);
538 size = (u64)val << 32;
540 val = ntb_spad_read(ndev, MW_SZ_LOW);
543 dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
545 rc = perf_set_mw(perf, size);
549 perf->link_is_up = true;
550 wake_up(&perf->link_wq);
558 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
559 schedule_delayed_work(&perf->link_work,
560 msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
563 static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
570 rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
571 &mw->xlat_align_size, NULL);
575 rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
579 perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
586 static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
587 size_t count, loff_t *offp)
589 struct perf_ctx *perf = filp->private_data;
591 ssize_t ret, out_off = 0;
592 struct pthr_ctx *pctx;
599 buf = kmalloc(1024, GFP_KERNEL);
603 if (mutex_is_locked(&perf->run_mutex)) {
604 out_off = scnprintf(buf, 64, "running\n");
608 for (i = 0; i < MAX_THREADS; i++) {
609 pctx = &perf->pthr_ctx[i];
611 if (pctx->status == -ENODATA)
615 out_off += scnprintf(buf + out_off, 1024 - out_off,
621 rate = div64_u64(pctx->copied, pctx->diff_us);
622 out_off += scnprintf(buf + out_off, 1024 - out_off,
623 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
624 i, pctx->copied, pctx->diff_us, rate);
628 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
634 static void threads_cleanup(struct perf_ctx *perf)
636 struct pthr_ctx *pctx;
639 for (i = 0; i < MAX_THREADS; i++) {
640 pctx = &perf->pthr_ctx[i];
642 pctx->status = kthread_stop(pctx->thread);
648 static void perf_clear_thread_status(struct perf_ctx *perf)
652 for (i = 0; i < MAX_THREADS; i++)
653 perf->pthr_ctx[i].status = -ENODATA;
656 static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
657 size_t count, loff_t *offp)
659 struct perf_ctx *perf = filp->private_data;
661 DECLARE_WAIT_QUEUE_HEAD(wq);
663 if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
666 if (perf->perf_threads == 0)
669 if (!mutex_trylock(&perf->run_mutex))
672 perf_clear_thread_status(perf);
674 if (perf->perf_threads > MAX_THREADS) {
675 perf->perf_threads = MAX_THREADS;
676 pr_info("Reset total threads to: %u\n", MAX_THREADS);
679 /* no greater than 1M */
680 if (seg_order > MAX_SEG_ORDER) {
681 seg_order = MAX_SEG_ORDER;
682 pr_info("Fix seg_order to %u\n", seg_order);
685 if (run_order < seg_order) {
686 run_order = seg_order;
687 pr_info("Fix run_order to %u\n", run_order);
690 node = on_node ? dev_to_node(&perf->ntb->pdev->dev)
692 atomic_set(&perf->tdone, 0);
694 /* launch kernel thread */
695 for (i = 0; i < perf->perf_threads; i++) {
696 struct pthr_ctx *pctx;
698 pctx = &perf->pthr_ctx[i];
699 atomic_set(&pctx->dma_sync, 0);
703 kthread_create_on_node(ntb_perf_thread,
705 node, "ntb_perf %d", i);
706 if (IS_ERR(pctx->thread)) {
710 wake_up_process(pctx->thread);
714 wait_event_interruptible(wq,
715 atomic_read(&perf->tdone) == perf->perf_threads);
717 threads_cleanup(perf);
718 mutex_unlock(&perf->run_mutex);
722 threads_cleanup(perf);
723 mutex_unlock(&perf->run_mutex);
727 static const struct file_operations ntb_perf_debugfs_run = {
728 .owner = THIS_MODULE,
730 .read = debugfs_run_read,
731 .write = debugfs_run_write,
734 static int perf_debugfs_setup(struct perf_ctx *perf)
736 struct pci_dev *pdev = perf->ntb->pdev;
737 struct dentry *debugfs_node_dir;
738 struct dentry *debugfs_run;
739 struct dentry *debugfs_threads;
741 if (!debugfs_initialized())
744 if (!perf_debugfs_dir) {
745 perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
746 if (!perf_debugfs_dir)
750 debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
752 if (!debugfs_node_dir)
755 debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
756 debugfs_node_dir, perf,
757 &ntb_perf_debugfs_run);
761 debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
763 &perf->perf_threads);
764 if (!debugfs_threads)
770 static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
772 struct pci_dev *pdev = ntb->pdev;
773 struct perf_ctx *perf;
777 if (ntb_spad_count(ntb) < MAX_SPAD) {
778 dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
783 if (!ntb->ops->mw_set_trans) {
784 dev_err(&ntb->dev, "Need inbound MW based NTB API\n");
788 if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
789 dev_warn(&ntb->dev, "Multi-port NTB devices unsupported\n");
791 node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE;
792 perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
799 perf->perf_threads = 1;
800 atomic_set(&perf->tsync, 0);
801 mutex_init(&perf->run_mutex);
802 spin_lock_init(&perf->db_lock);
803 perf_setup_mw(ntb, perf);
804 init_waitqueue_head(&perf->link_wq);
805 INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
807 rc = ntb_set_ctx(ntb, perf, &perf_ops);
811 perf->link_is_up = false;
812 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
815 rc = perf_debugfs_setup(perf);
819 perf_clear_thread_status(perf);
824 cancel_delayed_work_sync(&perf->link_work);
830 static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
832 struct perf_ctx *perf = ntb->ctx;
835 dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
837 mutex_lock(&perf->run_mutex);
839 cancel_delayed_work_sync(&perf->link_work);
842 ntb_link_disable(ntb);
844 debugfs_remove_recursive(perf_debugfs_dir);
845 perf_debugfs_dir = NULL;
848 for (i = 0; i < MAX_THREADS; i++) {
849 struct pthr_ctx *pctx = &perf->pthr_ctx[i];
852 dma_release_channel(pctx->dma_chan);
859 static struct ntb_client perf_client = {
862 .remove = perf_remove,
865 module_ntb_client(perf_client);