Marvin Liu | abd5669 | 2022-08-17 09:38:40 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: Apache-2.0 |
| 2 | * Copyright (c) 2022 Cisco Systems, Inc. |
| 3 | */ |
| 4 | |
| 5 | #include <vlib/vlib.h> |
| 6 | #include <vlib/physmem_funcs.h> |
| 7 | #include <vlib/dma/dma.h> |
| 8 | |
| 9 | static clib_error_t * |
| 10 | show_dma_backends_command_fn (vlib_main_t *vm, unformat_input_t *input, |
| 11 | vlib_cli_command_t *cmd) |
| 12 | { |
| 13 | vlib_dma_main_t *dm = &vlib_dma_main; |
| 14 | |
| 15 | if (vec_len (dm->backends)) |
| 16 | { |
| 17 | vlib_dma_backend_t *b; |
| 18 | vec_foreach (b, dm->backends) |
| 19 | vlib_cli_output (vm, "%s", b->name); |
| 20 | } |
| 21 | else |
| 22 | vlib_cli_output (vm, "No active DMA backends"); |
| 23 | |
| 24 | return 0; |
| 25 | } |
| 26 | |
| 27 | VLIB_CLI_COMMAND (avf_create_command, static) = { |
| 28 | .path = "show dma backends", |
| 29 | .short_help = "show dma backends", |
| 30 | .function = show_dma_backends_command_fn, |
| 31 | }; |
| 32 | |
| 33 | static void |
| 34 | test_dma_cb_fn (vlib_main_t *vm, vlib_dma_batch_t *b) |
| 35 | { |
| 36 | fformat (stderr, "%s: cb %p cookie %lx\n", __func__, b, |
| 37 | vlib_dma_batch_get_cookie (vm, b)); |
| 38 | } |
| 39 | |
| 40 | static clib_error_t * |
| 41 | fill_random_data (void *buffer, uword size) |
| 42 | { |
| 43 | uword seed = random_default_seed (); |
| 44 | |
| 45 | uword remain = size; |
| 46 | const uword p = clib_mem_get_page_size (); |
| 47 | uword offset = 0; |
| 48 | |
| 49 | clib_random_buffer_t rb; |
| 50 | clib_random_buffer_init (&rb, seed); |
| 51 | |
| 52 | while (remain > 0) |
| 53 | { |
| 54 | uword fill_size = clib_min (p, remain); |
| 55 | |
| 56 | clib_random_buffer_fill (&rb, fill_size); |
| 57 | void *rbuf = clib_random_buffer_get_data (&rb, fill_size); |
| 58 | clib_memcpy_fast (buffer + offset, rbuf, fill_size); |
| 59 | clib_random_buffer_free (&rb); |
| 60 | |
| 61 | offset += fill_size; |
| 62 | remain -= fill_size; |
| 63 | } |
| 64 | |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | static clib_error_t * |
| 69 | test_dma_command_fn (vlib_main_t *vm, unformat_input_t *input, |
| 70 | vlib_cli_command_t *cmd) |
| 71 | { |
| 72 | clib_error_t *err = 0; |
| 73 | vlib_dma_batch_t *b; |
| 74 | int config_index = -1; |
| 75 | u32 rsz, n_alloc, v; |
| 76 | u8 *from = 0, *to = 0; |
| 77 | vlib_dma_config_t cfg = { .max_transfers = 256, |
| 78 | .max_transfer_size = 4096, |
| 79 | .callback_fn = test_dma_cb_fn }; |
| 80 | |
| 81 | while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) |
| 82 | { |
| 83 | if (unformat (input, "transfers %u", &v)) |
| 84 | cfg.max_transfers = v; |
| 85 | else if (unformat (input, "size %u", &v)) |
| 86 | cfg.max_transfer_size = v; |
| 87 | else |
| 88 | return clib_error_return (0, "unknown input `%U'", |
| 89 | format_unformat_error, input); |
| 90 | } |
| 91 | |
| 92 | if ((config_index = vlib_dma_config_add (vm, &cfg)) < 0) |
| 93 | { |
| 94 | err = clib_error_return (0, "Unable to allocate dma config"); |
| 95 | return err; |
| 96 | } |
| 97 | |
| 98 | rsz = round_pow2 (cfg.max_transfer_size, CLIB_CACHE_LINE_BYTES); |
| 99 | n_alloc = rsz * cfg.max_transfers * 2; |
| 100 | |
| 101 | if ((from = vlib_physmem_alloc_aligned_on_numa ( |
| 102 | vm, n_alloc, CLIB_CACHE_LINE_BYTES, vm->numa_node)) == 0) |
| 103 | { |
| 104 | err = clib_error_return (0, "Unable to allocate %u bytes of physmem", |
| 105 | n_alloc); |
| 106 | return err; |
| 107 | } |
| 108 | to = from + n_alloc / 2; |
| 109 | |
| 110 | u32 port_allocator_seed; |
| 111 | |
Andrew Yourtchenko | a21b38e | 2022-08-26 13:13:20 +0000 | [diff] [blame] | 112 | fill_random_data (from, (uword) cfg.max_transfers * rsz); |
Marvin Liu | abd5669 | 2022-08-17 09:38:40 +0800 | [diff] [blame] | 113 | |
| 114 | b = vlib_dma_batch_new (vm, config_index); |
| 115 | vlib_dma_batch_set_cookie (vm, b, 0x12345678); |
| 116 | |
| 117 | port_allocator_seed = clib_cpu_time_now (); |
| 118 | int transfers = random_u32 (&port_allocator_seed) % cfg.max_transfers; |
| 119 | if (!transfers) |
| 120 | transfers = 1; |
| 121 | for (int i = 0; i < transfers; i++) |
| 122 | vlib_dma_batch_add (vm, b, to + i * rsz, from + i * rsz, |
| 123 | cfg.max_transfer_size); |
| 124 | |
| 125 | vlib_dma_batch_submit (vm, b); |
| 126 | return err; |
| 127 | } |
| 128 | |
| 129 | static clib_error_t * |
| 130 | test_show_dma_fn (vlib_main_t *vm, unformat_input_t *input, |
| 131 | vlib_cli_command_t *cmd) |
| 132 | { |
| 133 | clib_error_t *err = 0; |
| 134 | int config_index = 0; |
| 135 | while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) |
| 136 | { |
| 137 | if (unformat (input, "config %u", &config_index)) |
| 138 | ; |
| 139 | else |
| 140 | return clib_error_return (0, "unknown input `%U'", |
| 141 | format_unformat_error, input); |
| 142 | } |
| 143 | |
| 144 | for (u32 i = 0; i < vlib_get_n_threads (); i++) |
| 145 | vlib_cli_output (vm, "Config %d %U", config_index, vlib_dma_config_info, |
| 146 | config_index, vlib_get_main_by_index (i)); |
| 147 | return err; |
| 148 | } |
| 149 | |
| 150 | VLIB_CLI_COMMAND (test_dma_command, static) = { |
| 151 | .path = "test dma", |
| 152 | .short_help = "test dma [transfers <x> size <x>]", |
| 153 | .function = test_dma_command_fn, |
| 154 | }; |
| 155 | |
| 156 | VLIB_CLI_COMMAND (show_dma_command, static) = { |
| 157 | .path = "show dma", |
| 158 | .short_help = "show dma [config <x>]", |
| 159 | .function = test_show_dma_fn, |
| 160 | }; |