John DeNisco | 68b0ee3 | 2017-09-27 16:35:23 -0400 | [diff] [blame] | 1 | |
| 2 | unix {{ |
| 3 | {unix} |
| 4 | log /tmp/vpp.log |
| 5 | full-coredump |
| 6 | cli-listen /run/vpp/cli.sock |
| 7 | }} |
| 8 | |
| 9 | api-trace {{ |
| 10 | on |
| 11 | }} |
| 12 | |
| 13 | cpu {{ |
| 14 | {cpu} |
| 15 | scheduler-policy fifo |
| 16 | scheduler-priority 50 |
| 17 | |
| 18 | ## In the VPP there is one main thread and optionally the user can create worker(s) |
| 19 | ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically |
| 20 | |
| 21 | ## Manual pinning of thread(s) to CPU core(s) |
| 22 | |
| 23 | ## Set logical CPU core where main thread runs |
| 24 | # main-core 1 |
| 25 | |
| 26 | ## Set logical CPU core(s) where worker threads are running |
| 27 | # corelist-workers 2-3,18-19 |
| 28 | |
| 29 | ## Automatic pinning of thread(s) to CPU core(s) |
| 30 | |
| 31 | ## Sets number of CPU core(s) to be skipped (1 ... N-1) |
| 32 | ## Skipped CPU core(s) are not used for pinning main thread and working thread(s). |
| 33 | ## The main thread is automatically pinned to the first available CPU core and worker(s) |
| 34 | ## are pinned to next free CPU core(s) after core assigned to main thread |
| 35 | # skip-cores 4 |
| 36 | |
| 37 | ## Specify a number of workers to be created |
| 38 | ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s) |
| 39 | ## and main thread's CPU core |
| 40 | # workers 2 |
| 41 | |
| 42 | ## Set scheduling policy and priority of main and worker threads |
| 43 | |
| 44 | ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH) |
| 45 | ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR) |
| 46 | # scheduler-policy fifo |
| 47 | |
| 48 | ## Scheduling priority is used only for "real-time policies (fifo and rr), |
| 49 | ## and has to be in the range of priorities supported for a particular policy |
| 50 | # scheduler-priority 50 |
| 51 | }} |
| 52 | |
| 53 | dpdk {{ |
| 54 | {devices} |
| 55 | |
| 56 | ## Change default settings for all intefaces |
| 57 | # dev default {{ |
| 58 | ## Number of receive queues, enables RSS |
| 59 | ## Default is 1 |
| 60 | # num-rx-queues 3 |
| 61 | |
| 62 | ## Number of transmit queues, Default is equal |
| 63 | ## to number of worker threads or 1 if no workers treads |
| 64 | # num-tx-queues 3 |
| 65 | |
| 66 | ## Number of descriptors in transmit and receive rings |
| 67 | ## increasing or reducing number can impact performance |
| 68 | ## Default is 1024 for both rx and tx |
| 69 | # num-rx-desc 512 |
| 70 | # num-tx-desc 512 |
| 71 | |
| 72 | ## VLAN strip offload mode for interface |
| 73 | ## Default is off |
| 74 | # vlan-strip-offload on |
| 75 | # }} |
| 76 | |
| 77 | ## Whitelist specific interface by specifying PCI address |
| 78 | # dev 0000:02:00.0 |
| 79 | |
| 80 | ## Whitelist specific interface by specifying PCI address and in |
| 81 | ## addition specify custom parameters for this interface |
| 82 | # dev 0000:02:00.1 {{ |
| 83 | # num-rx-queues 2 |
| 84 | # }} |
| 85 | |
| 86 | ## Specify bonded interface and its slaves via PCI addresses |
| 87 | ## |
| 88 | ## Bonded interface in XOR load balance mode (mode 2) with L3 and L4 headers |
| 89 | # vdev eth_bond0,mode=2,slave=0000:02:00.0,slave=0000:03:00.0,xmit_policy=l34 |
| 90 | # vdev eth_bond1,mode=2,slave=0000:02:00.1,slave=0000:03:00.1,xmit_policy=l34 |
| 91 | ## |
| 92 | ## Bonded interface in Active-Back up mode (mode 1) |
| 93 | # vdev eth_bond0,mode=1,slave=0000:02:00.0,slave=0000:03:00.0 |
| 94 | # vdev eth_bond1,mode=1,slave=0000:02:00.1,slave=0000:03:00.1 |
| 95 | |
| 96 | ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci |
| 97 | ## and uio_pci_generic (default) |
| 98 | # uio-driver vfio-pci |
| 99 | |
| 100 | ## Disable mutli-segment buffers, improves performance but |
| 101 | ## disables Jumbo MTU support |
| 102 | # no-multi-seg |
| 103 | |
| 104 | ## Increase number of buffers allocated, needed only in scenarios with |
| 105 | ## large number of interfaces and worker threads. Value is per CPU socket. |
| 106 | ## Default is 16384 |
| 107 | # num-mbufs 128000 |
| 108 | |
| 109 | ## Change hugepages allocation per-socket, needed only if there is need for |
| 110 | ## larger number of mbufs. Default is 256M on each detected CPU socket |
| 111 | # socket-mem 2048,2048 |
| 112 | }} |
| 113 | |
| 114 | # Adjusting the plugin path depending on where the VPP plugins are: |
| 115 | #plugins |
| 116 | #{{ |
| 117 | # path /home/bms/vpp/build-root/install-vpp-native/vpp/lib64/vpp_plugins |
| 118 | #}} |
| 119 | |
| 120 | # Alternate syntax to choose plugin path |
| 121 | #plugin_path /home/bms/vpp/build-root/install-vpp-native/vpp/lib64/vpp_plugins |
| 122 | |
| 123 | {tcp} |